@inproceedings{lim-etal-2024-modularized,
title = "Modularized Multilingual {NMT} with Fine-grained Interlingua",
author = "Lim, Sungjun and
Choi, Yoonjung and
Kim, Sangha",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.naacl-long.328",
pages = "5884--5899",
abstract = "Recently, one popular alternative in Multilingual NMT (MNMT) is modularized MNMT that has both language-specific encoders and decoders. However, due to the absence of layer-sharing, the modularized MNMT failed to produce satisfactory language-independent (Interlingua) features, leading to performance degradation in zero-shot translation. To address this issue, a solution was proposed to share the top of language-specific encoder layers, enabling the successful generation of interlingua features. Nonetheless, it should be noted that this sharing structure does not guarantee the explicit propagation of language-specific features to their respective language-specific decoders. Consequently, to overcome this challenge, we present our modularized MNMT approach, where a modularized encoder is divided into three distinct encoder modules based on different sharing criteria: (1) source language-specific ($Enc_{s}$); (2) universal ($Enc_{all}$); (3) target language-specific ($Enc_{t}$). By employing these sharing strategies, $Enc_{all}$ propagates the interlingua features, after which $Enc_{t}$ propagates the target language-specific features to the language-specific decoders. Additionally, we suggest the Denoising Bi-path Autoencoder (DBAE) to fortify the Denoising Autoencoder (DAE) by leveraging $Enc_{t}$. For experimental purposes, our training corpus comprises both En-to-Any and Any-to-En directions. We adjust the size of our corpus to simulate both balanced and unbalanced settings. Our method demonstrates an improved average BLEU score by ''+2.90{''} in En-to-Any directions and by ''+3.06{''} in zero-shot compared to other MNMT baselines.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lim-etal-2024-modularized">
<titleInfo>
<title>Modularized Multilingual NMT with Fine-grained Interlingua</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sungjun</namePart>
<namePart type="family">Lim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yoonjung</namePart>
<namePart type="family">Choi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sangha</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Duh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Helena</namePart>
<namePart type="family">Gomez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bethard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recently, one popular alternative in Multilingual NMT (MNMT) is modularized MNMT that has both language-specific encoders and decoders. However, due to the absence of layer-sharing, the modularized MNMT failed to produce satisfactory language-independent (Interlingua) features, leading to performance degradation in zero-shot translation. To address this issue, a solution was proposed to share the top of language-specific encoder layers, enabling the successful generation of interlingua features. Nonetheless, it should be noted that this sharing structure does not guarantee the explicit propagation of language-specific features to their respective language-specific decoders. Consequently, to overcome this challenge, we present our modularized MNMT approach, where a modularized encoder is divided into three distinct encoder modules based on different sharing criteria: (1) source language-specific (Enc_s); (2) universal (Enc_all); (3) target language-specific (Enc_t). By employing these sharing strategies, Enc_all propagates the interlingua features, after which Enc_t propagates the target language-specific features to the language-specific decoders. Additionally, we suggest the Denoising Bi-path Autoencoder (DBAE) to fortify the Denoising Autoencoder (DAE) by leveraging Enc_t. For experimental purposes, our training corpus comprises both En-to-Any and Any-to-En directions. We adjust the size of our corpus to simulate both balanced and unbalanced settings. Our method demonstrates an improved average BLEU score by ”+2.90” in En-to-Any directions and by ”+3.06” in zero-shot compared to other MNMT baselines.</abstract>
<identifier type="citekey">lim-etal-2024-modularized</identifier>
<location>
<url>https://aclanthology.org/2024.naacl-long.328</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>5884</start>
<end>5899</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Modularized Multilingual NMT with Fine-grained Interlingua
%A Lim, Sungjun
%A Choi, Yoonjung
%A Kim, Sangha
%Y Duh, Kevin
%Y Gomez, Helena
%Y Bethard, Steven
%S Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F lim-etal-2024-modularized
%X Recently, one popular alternative in Multilingual NMT (MNMT) is modularized MNMT that has both language-specific encoders and decoders. However, due to the absence of layer-sharing, the modularized MNMT failed to produce satisfactory language-independent (Interlingua) features, leading to performance degradation in zero-shot translation. To address this issue, a solution was proposed to share the top of language-specific encoder layers, enabling the successful generation of interlingua features. Nonetheless, it should be noted that this sharing structure does not guarantee the explicit propagation of language-specific features to their respective language-specific decoders. Consequently, to overcome this challenge, we present our modularized MNMT approach, where a modularized encoder is divided into three distinct encoder modules based on different sharing criteria: (1) source language-specific (Enc_s); (2) universal (Enc_all); (3) target language-specific (Enc_t). By employing these sharing strategies, Enc_all propagates the interlingua features, after which Enc_t propagates the target language-specific features to the language-specific decoders. Additionally, we suggest the Denoising Bi-path Autoencoder (DBAE) to fortify the Denoising Autoencoder (DAE) by leveraging Enc_t. For experimental purposes, our training corpus comprises both En-to-Any and Any-to-En directions. We adjust the size of our corpus to simulate both balanced and unbalanced settings. Our method demonstrates an improved average BLEU score by ”+2.90” in En-to-Any directions and by ”+3.06” in zero-shot compared to other MNMT baselines.
%U https://aclanthology.org/2024.naacl-long.328
%P 5884-5899
Markdown (Informal)
[Modularized Multilingual NMT with Fine-grained Interlingua](https://aclanthology.org/2024.naacl-long.328) (Lim et al., NAACL 2024)
ACL
- Sungjun Lim, Yoonjung Choi, and Sangha Kim. 2024. Modularized Multilingual NMT with Fine-grained Interlingua. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 5884–5899, Mexico City, Mexico. Association for Computational Linguistics.