@inproceedings{guo-etal-2014-tunable,
title = "A tunable language model for statistical machine translation",
author = "Guo, Junfei and
Liu, Juan and
Han, Qi and
Maletti, Andreas",
booktitle = "Proceedings of the 11th Conference of the Association for Machine Translation in the Americas: MT Researchers Track",
month = oct # " 22-26",
year = "2014",
address = "Vancouver, Canada",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2014.amta-researchers.27",
pages = "356--368",
abstract = "A novel variation of modified KNESER-NEY model using monomial discounting is presented and integrated into the MOSES statistical machine translation toolkit. The language model is trained on a large training set as usual, but its new discount parameters are tuned to the small development set. An in-domain and cross-domain evaluation of the language model is performed based on perplexity, in which sizable improvements are obtained. Additionally, the performance of the language model is also evaluated in several major machine translation tasks including Chinese-to-English. In those tests, the test data is from a (slightly) different domain than the training data. The experimental results indicate that the new model significantly outperforms a baseline model using SRILM in those domain adaptation scenarios. The new language model is thus ideally suited for domain adaptation without sacrificing performance on in-domain experiments.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="guo-etal-2014-tunable">
<titleInfo>
<title>A tunable language model for statistical machine translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Junfei</namePart>
<namePart type="family">Guo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qi</namePart>
<namePart type="family">Han</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andreas</namePart>
<namePart type="family">Maletti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2014-oct 22-26</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 11th Conference of the Association for Machine Translation in the Americas: MT Researchers Track</title>
</titleInfo>
<originInfo>
<publisher>Association for Machine Translation in the Americas</publisher>
<place>
<placeTerm type="text">Vancouver, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>A novel variation of modified KNESER-NEY model using monomial discounting is presented and integrated into the MOSES statistical machine translation toolkit. The language model is trained on a large training set as usual, but its new discount parameters are tuned to the small development set. An in-domain and cross-domain evaluation of the language model is performed based on perplexity, in which sizable improvements are obtained. Additionally, the performance of the language model is also evaluated in several major machine translation tasks including Chinese-to-English. In those tests, the test data is from a (slightly) different domain than the training data. The experimental results indicate that the new model significantly outperforms a baseline model using SRILM in those domain adaptation scenarios. The new language model is thus ideally suited for domain adaptation without sacrificing performance on in-domain experiments.</abstract>
<identifier type="citekey">guo-etal-2014-tunable</identifier>
<location>
<url>https://aclanthology.org/2014.amta-researchers.27</url>
</location>
<part>
<date>2014-oct 22-26</date>
<extent unit="page">
<start>356</start>
<end>368</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A tunable language model for statistical machine translation
%A Guo, Junfei
%A Liu, Juan
%A Han, Qi
%A Maletti, Andreas
%S Proceedings of the 11th Conference of the Association for Machine Translation in the Americas: MT Researchers Track
%D 2014
%8 oct 22 26
%I Association for Machine Translation in the Americas
%C Vancouver, Canada
%F guo-etal-2014-tunable
%X A novel variation of modified KNESER-NEY model using monomial discounting is presented and integrated into the MOSES statistical machine translation toolkit. The language model is trained on a large training set as usual, but its new discount parameters are tuned to the small development set. An in-domain and cross-domain evaluation of the language model is performed based on perplexity, in which sizable improvements are obtained. Additionally, the performance of the language model is also evaluated in several major machine translation tasks including Chinese-to-English. In those tests, the test data is from a (slightly) different domain than the training data. The experimental results indicate that the new model significantly outperforms a baseline model using SRILM in those domain adaptation scenarios. The new language model is thus ideally suited for domain adaptation without sacrificing performance on in-domain experiments.
%U https://aclanthology.org/2014.amta-researchers.27
%P 356-368
Markdown (Informal)
[A tunable language model for statistical machine translation](https://aclanthology.org/2014.amta-researchers.27) (Guo et al., AMTA 2014)
ACL
- Junfei Guo, Juan Liu, Qi Han, and Andreas Maletti. 2014. A tunable language model for statistical machine translation. In Proceedings of the 11th Conference of the Association for Machine Translation in the Americas: MT Researchers Track, pages 356–368, Vancouver, Canada. Association for Machine Translation in the Americas.