@inproceedings{zhang2020uwspeech, author = {Zhang, Chen and Tan, Xu and Ren, Yi and Qin, Tao and Zhang, Kejun and Liu, Tie-Yan}, title = {UWSpeech: Speech to Speech Translation for Unwritten Languages}, booktitle = {AAAI 2021}, year = {2020}, month = {December}, abstract = {Existing speech to speech translation systems heavily rely on the text of target language: they usually translate source language either to target text and then synthesize target speech from text, or directly to target speech with target text for auxiliary training. However, those methods cannot be applied to unwritten target languages, which have no written text or phoneme available. In this paper, we develop a translation system for unwritten languages, named as UWSpeech, which converts target unwritten speech into discrete tokens with a converter, and then translates source-language speech into target discrete tokens with a translator, and finally synthesizes target speech from target discrete tokens with an inverter. We propose a method called XL-VAE, which enhances vector quantized variational autoencoder (VQ-VAE) with cross-lingual (XL) speech recognition, to train the converter and inverter of UWSpeech jointly. Experiments on Fisher SpanishEnglish conversation translation dataset show that UWSpeech outperforms direct translation and VQ-VAE baseline by about 16 and 10 BLEU points respectively, which demonstrate the advantages and potentials of UWSpeech.}, url = {http://approjects.co.za/?big=en-us/research/publication/uwspeech-speech-to-speech-translation-for-unwritten-languages/}, }