@inproceedings{xiaoning-etal-2021-low,
title = "Low-Resource Machine Translation based on Asynchronous Dynamic Programming",
author = "Xiaoning, Jia and
Hongxu, Hou and
Nier, Wu and
Haoran, Li and
Xin, Chang",
editor = "Li, Sheng and
Sun, Maosong and
Liu, Yang and
Wu, Hua and
Liu, Kang and
Che, Wanxiang and
He, Shizhu and
Rao, Gaoqi",
booktitle = "Proceedings of the 20th Chinese National Conference on Computational Linguistics",
month = aug,
year = "2021",
address = "Huhhot, China",
publisher = "Chinese Information Processing Society of China",
url = "https://aclanthology.org/2021.ccl-1.79",
pages = "886--894",
abstract = "Reinforcement learning has been proved to be effective in handling low resource machine trans-lation tasks and different sampling methods of reinforcement learning affect the performance ofthe model. The reward for generating translation is determined by the scalability and iteration ofthe sampling strategy so it is difficult for the model to achieve bias-variance trade-off. Therefore according to the poor ability of the model to analyze the structure of the sequence in low-resourcetasks this paper proposes a neural machine translation model parameter optimization method for asynchronous dynamic programming training strategies. In view of the experience priority situa-tion under the current strategy each selective sampling experience not only improves the value ofthe experience state but also avoids the high computational resource consumption inherent in tra-ditional valuation methods (such as dynamic programming). We verify the Mongolian-Chineseand Uyghur-Chinese tasks on CCMT2019. The result shows that our method has improved the quality of low-resource neural machine translation model compared with general reinforcement learning methods which fully demonstrates the effectiveness of our method.",
language = "English",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="xiaoning-etal-2021-low">
<titleInfo>
<title>Low-Resource Machine Translation based on Asynchronous Dynamic Programming</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jia</namePart>
<namePart type="family">Xiaoning</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hou</namePart>
<namePart type="family">Hongxu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wu</namePart>
<namePart type="family">Nier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Li</namePart>
<namePart type="family">Haoran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chang</namePart>
<namePart type="family">Xin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<language>
<languageTerm type="text">English</languageTerm>
<languageTerm type="code" authority="iso639-2b">eng</languageTerm>
</language>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 20th Chinese National Conference on Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sheng</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maosong</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hua</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shizhu</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gaoqi</namePart>
<namePart type="family">Rao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Chinese Information Processing Society of China</publisher>
<place>
<placeTerm type="text">Huhhot, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Reinforcement learning has been proved to be effective in handling low resource machine trans-lation tasks and different sampling methods of reinforcement learning affect the performance ofthe model. The reward for generating translation is determined by the scalability and iteration ofthe sampling strategy so it is difficult for the model to achieve bias-variance trade-off. Therefore according to the poor ability of the model to analyze the structure of the sequence in low-resourcetasks this paper proposes a neural machine translation model parameter optimization method for asynchronous dynamic programming training strategies. In view of the experience priority situa-tion under the current strategy each selective sampling experience not only improves the value ofthe experience state but also avoids the high computational resource consumption inherent in tra-ditional valuation methods (such as dynamic programming). We verify the Mongolian-Chineseand Uyghur-Chinese tasks on CCMT2019. The result shows that our method has improved the quality of low-resource neural machine translation model compared with general reinforcement learning methods which fully demonstrates the effectiveness of our method.</abstract>
<identifier type="citekey">xiaoning-etal-2021-low</identifier>
<location>
<url>https://aclanthology.org/2021.ccl-1.79</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>886</start>
<end>894</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Low-Resource Machine Translation based on Asynchronous Dynamic Programming
%A Xiaoning, Jia
%A Hongxu, Hou
%A Nier, Wu
%A Haoran, Li
%A Xin, Chang
%Y Li, Sheng
%Y Sun, Maosong
%Y Liu, Yang
%Y Wu, Hua
%Y Liu, Kang
%Y Che, Wanxiang
%Y He, Shizhu
%Y Rao, Gaoqi
%S Proceedings of the 20th Chinese National Conference on Computational Linguistics
%D 2021
%8 August
%I Chinese Information Processing Society of China
%C Huhhot, China
%G English
%F xiaoning-etal-2021-low
%X Reinforcement learning has been proved to be effective in handling low resource machine trans-lation tasks and different sampling methods of reinforcement learning affect the performance ofthe model. The reward for generating translation is determined by the scalability and iteration ofthe sampling strategy so it is difficult for the model to achieve bias-variance trade-off. Therefore according to the poor ability of the model to analyze the structure of the sequence in low-resourcetasks this paper proposes a neural machine translation model parameter optimization method for asynchronous dynamic programming training strategies. In view of the experience priority situa-tion under the current strategy each selective sampling experience not only improves the value ofthe experience state but also avoids the high computational resource consumption inherent in tra-ditional valuation methods (such as dynamic programming). We verify the Mongolian-Chineseand Uyghur-Chinese tasks on CCMT2019. The result shows that our method has improved the quality of low-resource neural machine translation model compared with general reinforcement learning methods which fully demonstrates the effectiveness of our method.
%U https://aclanthology.org/2021.ccl-1.79
%P 886-894
Markdown (Informal)
[Low-Resource Machine Translation based on Asynchronous Dynamic Programming](https://aclanthology.org/2021.ccl-1.79) (Xiaoning et al., CCL 2021)
ACL