@inproceedings{yang-etal-2023-kyoto,
title = "The {K}yoto Speech-to-Speech Translation System for {IWSLT} 2023",
author = "Yang, Zhengdong and
Shimizu, Shuichiro and
Zhou, Wangjin and
Li, Sheng and
Chu, Chenhui",
editor = "Salesky, Elizabeth and
Federico, Marcello and
Carpuat, Marine",
booktitle = "Proceedings of the 20th International Conference on Spoken Language Translation (IWSLT 2023)",
month = jul,
year = "2023",
address = "Toronto, Canada (in-person and online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.iwslt-1.33",
doi = "10.18653/v1/2023.iwslt-1.33",
pages = "357--362",
abstract = "This paper describes the Kyoto speech-to-speech translation system for IWSLT 2023. Our system is a combination of speech-to-text translation and text-to-speech synthesis. For the speech-to-text translation model, we used the dual-decoderTransformer model. For text-to-speech synthesis model, we took a cascade approach of an acoustic model and a vocoder.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yang-etal-2023-kyoto">
<titleInfo>
<title>The Kyoto Speech-to-Speech Translation System for IWSLT 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhengdong</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shuichiro</namePart>
<namePart type="family">Shimizu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wangjin</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sheng</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chenhui</namePart>
<namePart type="family">Chu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 20th International Conference on Spoken Language Translation (IWSLT 2023)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Elizabeth</namePart>
<namePart type="family">Salesky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marcello</namePart>
<namePart type="family">Federico</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marine</namePart>
<namePart type="family">Carpuat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Toronto, Canada (in-person and online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes the Kyoto speech-to-speech translation system for IWSLT 2023. Our system is a combination of speech-to-text translation and text-to-speech synthesis. For the speech-to-text translation model, we used the dual-decoderTransformer model. For text-to-speech synthesis model, we took a cascade approach of an acoustic model and a vocoder.</abstract>
<identifier type="citekey">yang-etal-2023-kyoto</identifier>
<identifier type="doi">10.18653/v1/2023.iwslt-1.33</identifier>
<location>
<url>https://aclanthology.org/2023.iwslt-1.33</url>
</location>
<part>
<date>2023-07</date>
<extent unit="page">
<start>357</start>
<end>362</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T The Kyoto Speech-to-Speech Translation System for IWSLT 2023
%A Yang, Zhengdong
%A Shimizu, Shuichiro
%A Zhou, Wangjin
%A Li, Sheng
%A Chu, Chenhui
%Y Salesky, Elizabeth
%Y Federico, Marcello
%Y Carpuat, Marine
%S Proceedings of the 20th International Conference on Spoken Language Translation (IWSLT 2023)
%D 2023
%8 July
%I Association for Computational Linguistics
%C Toronto, Canada (in-person and online)
%F yang-etal-2023-kyoto
%X This paper describes the Kyoto speech-to-speech translation system for IWSLT 2023. Our system is a combination of speech-to-text translation and text-to-speech synthesis. For the speech-to-text translation model, we used the dual-decoderTransformer model. For text-to-speech synthesis model, we took a cascade approach of an acoustic model and a vocoder.
%R 10.18653/v1/2023.iwslt-1.33
%U https://aclanthology.org/2023.iwslt-1.33
%U https://doi.org/10.18653/v1/2023.iwslt-1.33
%P 357-362
Markdown (Informal)
[The Kyoto Speech-to-Speech Translation System for IWSLT 2023](https://aclanthology.org/2023.iwslt-1.33) (Yang et al., IWSLT 2023)
ACL
- Zhengdong Yang, Shuichiro Shimizu, Wangjin Zhou, Sheng Li, and Chenhui Chu. 2023. The Kyoto Speech-to-Speech Translation System for IWSLT 2023. In Proceedings of the 20th International Conference on Spoken Language Translation (IWSLT 2023), pages 357–362, Toronto, Canada (in-person and online). Association for Computational Linguistics.