@inproceedings{lu-zhang-2024-improving,
title = "Improving Unsupervised Neural Machine Translation via Training Data Self-Correction",
author = "Lu, Jinliang and
Zhang, Jiajun",
editor = "Calzolari, Nicoletta and
Kan, Min-Yen and
Hoste, Veronique and
Lenci, Alessandro and
Sakti, Sakriani and
Xue, Nianwen",
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://aclanthology.org/2024.lrec-main.783/",
pages = "8942--8954",
abstract = "Unsupervised neural machine translation (UNMT) models are trained with pseudo-parallel sentences constructed by on-the-fly back-translation using monolingual corpora. However, the quality of pseudo-parallel sentences cannot be guaranteed, which hinders the final performance of UNMT. This paper demonstrates that although UNMT usually generates mistakes during pseudo-parallel data construction, some of them can be corrected by the token-level translations that exist in the embedding table. Therefore, we propose a self-correction method to automatically improve the quality of pseudo-parallel sentences during training, thereby enhancing translation performance. Specifically, for a pseudo sentence pair, our self-correction method first estimates the alignment relations between tokens by treating and solving it as an optimal transport problem. Then, we measure the translation reliability for each token and detect the mis-translated ones. Finally, the mis-translated tokens are corrected with real-time computed token-by-token translations based on the embedding table, yielding a better training example. Considering that the modified examples are semantically equivalent to the original ones when UNMT converges, we introduce second-phase training to strengthen the output consistency between them, further improving the generalization capability and translation performance. Empirical results on widely used UNMT datasets demonstrate the effectiveness of our method and it significantly outperforms several strong baselines."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lu-zhang-2024-improving">
<titleInfo>
<title>Improving Unsupervised Neural Machine Translation via Training Data Self-Correction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jinliang</namePart>
<namePart type="family">Lu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiajun</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nicoletta</namePart>
<namePart type="family">Calzolari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Min-Yen</namePart>
<namePart type="family">Kan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Veronique</namePart>
<namePart type="family">Hoste</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Lenci</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sakriani</namePart>
<namePart type="family">Sakti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nianwen</namePart>
<namePart type="family">Xue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>ELRA and ICCL</publisher>
<place>
<placeTerm type="text">Torino, Italia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Unsupervised neural machine translation (UNMT) models are trained with pseudo-parallel sentences constructed by on-the-fly back-translation using monolingual corpora. However, the quality of pseudo-parallel sentences cannot be guaranteed, which hinders the final performance of UNMT. This paper demonstrates that although UNMT usually generates mistakes during pseudo-parallel data construction, some of them can be corrected by the token-level translations that exist in the embedding table. Therefore, we propose a self-correction method to automatically improve the quality of pseudo-parallel sentences during training, thereby enhancing translation performance. Specifically, for a pseudo sentence pair, our self-correction method first estimates the alignment relations between tokens by treating and solving it as an optimal transport problem. Then, we measure the translation reliability for each token and detect the mis-translated ones. Finally, the mis-translated tokens are corrected with real-time computed token-by-token translations based on the embedding table, yielding a better training example. Considering that the modified examples are semantically equivalent to the original ones when UNMT converges, we introduce second-phase training to strengthen the output consistency between them, further improving the generalization capability and translation performance. Empirical results on widely used UNMT datasets demonstrate the effectiveness of our method and it significantly outperforms several strong baselines.</abstract>
<identifier type="citekey">lu-zhang-2024-improving</identifier>
<location>
<url>https://aclanthology.org/2024.lrec-main.783/</url>
</location>
<part>
<date>2024-05</date>
<extent unit="page">
<start>8942</start>
<end>8954</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Improving Unsupervised Neural Machine Translation via Training Data Self-Correction
%A Lu, Jinliang
%A Zhang, Jiajun
%Y Calzolari, Nicoletta
%Y Kan, Min-Yen
%Y Hoste, Veronique
%Y Lenci, Alessandro
%Y Sakti, Sakriani
%Y Xue, Nianwen
%S Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)
%D 2024
%8 May
%I ELRA and ICCL
%C Torino, Italia
%F lu-zhang-2024-improving
%X Unsupervised neural machine translation (UNMT) models are trained with pseudo-parallel sentences constructed by on-the-fly back-translation using monolingual corpora. However, the quality of pseudo-parallel sentences cannot be guaranteed, which hinders the final performance of UNMT. This paper demonstrates that although UNMT usually generates mistakes during pseudo-parallel data construction, some of them can be corrected by the token-level translations that exist in the embedding table. Therefore, we propose a self-correction method to automatically improve the quality of pseudo-parallel sentences during training, thereby enhancing translation performance. Specifically, for a pseudo sentence pair, our self-correction method first estimates the alignment relations between tokens by treating and solving it as an optimal transport problem. Then, we measure the translation reliability for each token and detect the mis-translated ones. Finally, the mis-translated tokens are corrected with real-time computed token-by-token translations based on the embedding table, yielding a better training example. Considering that the modified examples are semantically equivalent to the original ones when UNMT converges, we introduce second-phase training to strengthen the output consistency between them, further improving the generalization capability and translation performance. Empirical results on widely used UNMT datasets demonstrate the effectiveness of our method and it significantly outperforms several strong baselines.
%U https://aclanthology.org/2024.lrec-main.783/
%P 8942-8954
Markdown (Informal)
[Improving Unsupervised Neural Machine Translation via Training Data Self-Correction](https://aclanthology.org/2024.lrec-main.783/) (Lu & Zhang, LREC-COLING 2024)
ACL