@inproceedings{tamura-etal-2023-masked,
title = "Does Masked Language Model Pre-training with Artificial Data Improve Low-resource Neural Machine Translation?",
author = "Tamura, Hiroto and
Hirasawa, Tosho and
Kim, Hwichan and
Komachi, Mamoru",
editor = "Vlachos, Andreas and
Augenstein, Isabelle",
booktitle = "Findings of the Association for Computational Linguistics: EACL 2023",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-eacl.166",
doi = "10.18653/v1/2023.findings-eacl.166",
pages = "2216--2225",
abstract = "Pre-training masked language models (MLMs) with artificial data has been proven beneficial for several natural language processing tasks such as natural language understanding and summarization; however, it has been less explored for neural machine translation (NMT).A previous study revealed the benefit of transfer learning for NMT in a limited setup, which differs from MLM.In this study, we prepared two kinds of artificial data and compared the translation performance of NMT when pre-trained with MLM.In addition to the random sequences, we created artificial data mimicking token frequency information from the real world. Our results showed that pre-training the models with artificial data by MLM improves translation performance in low-resource situations. Additionally, we found that pre-training on artificial data created considering token frequency information facilitates improved performance.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tamura-etal-2023-masked">
<titleInfo>
<title>Does Masked Language Model Pre-training with Artificial Data Improve Low-resource Neural Machine Translation?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hiroto</namePart>
<namePart type="family">Tamura</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tosho</namePart>
<namePart type="family">Hirasawa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hwichan</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mamoru</namePart>
<namePart type="family">Komachi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EACL 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Andreas</namePart>
<namePart type="family">Vlachos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isabelle</namePart>
<namePart type="family">Augenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dubrovnik, Croatia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Pre-training masked language models (MLMs) with artificial data has been proven beneficial for several natural language processing tasks such as natural language understanding and summarization; however, it has been less explored for neural machine translation (NMT).A previous study revealed the benefit of transfer learning for NMT in a limited setup, which differs from MLM.In this study, we prepared two kinds of artificial data and compared the translation performance of NMT when pre-trained with MLM.In addition to the random sequences, we created artificial data mimicking token frequency information from the real world. Our results showed that pre-training the models with artificial data by MLM improves translation performance in low-resource situations. Additionally, we found that pre-training on artificial data created considering token frequency information facilitates improved performance.</abstract>
<identifier type="citekey">tamura-etal-2023-masked</identifier>
<identifier type="doi">10.18653/v1/2023.findings-eacl.166</identifier>
<location>
<url>https://aclanthology.org/2023.findings-eacl.166</url>
</location>
<part>
<date>2023-05</date>
<extent unit="page">
<start>2216</start>
<end>2225</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Does Masked Language Model Pre-training with Artificial Data Improve Low-resource Neural Machine Translation?
%A Tamura, Hiroto
%A Hirasawa, Tosho
%A Kim, Hwichan
%A Komachi, Mamoru
%Y Vlachos, Andreas
%Y Augenstein, Isabelle
%S Findings of the Association for Computational Linguistics: EACL 2023
%D 2023
%8 May
%I Association for Computational Linguistics
%C Dubrovnik, Croatia
%F tamura-etal-2023-masked
%X Pre-training masked language models (MLMs) with artificial data has been proven beneficial for several natural language processing tasks such as natural language understanding and summarization; however, it has been less explored for neural machine translation (NMT).A previous study revealed the benefit of transfer learning for NMT in a limited setup, which differs from MLM.In this study, we prepared two kinds of artificial data and compared the translation performance of NMT when pre-trained with MLM.In addition to the random sequences, we created artificial data mimicking token frequency information from the real world. Our results showed that pre-training the models with artificial data by MLM improves translation performance in low-resource situations. Additionally, we found that pre-training on artificial data created considering token frequency information facilitates improved performance.
%R 10.18653/v1/2023.findings-eacl.166
%U https://aclanthology.org/2023.findings-eacl.166
%U https://doi.org/10.18653/v1/2023.findings-eacl.166
%P 2216-2225
Markdown (Informal)
[Does Masked Language Model Pre-training with Artificial Data Improve Low-resource Neural Machine Translation?](https://aclanthology.org/2023.findings-eacl.166) (Tamura et al., Findings 2023)
ACL