@inproceedings{rikters-2016-neural,
title = "Neural Network Language Models for Candidate Scoring in Hybrid Multi-System Machine Translation",
author = "Rikters, Mat{\=\i}ss",
editor = "Lambert, Patrik and
Babych, Bogdan and
Eberle, Kurt and
Banchs, Rafael E. and
Rapp, Reinhard and
Costa-juss{\`a}, Marta R.",
booktitle = "Proceedings of the Sixth Workshop on Hybrid Approaches to Translation ({H}y{T}ra6)",
month = dec,
year = "2016",
address = "Osaka, Japan",
publisher = "The COLING 2016 Organizing Committee",
url = "https://aclanthology.org/W16-4502",
pages = "8--15",
abstract = "This paper presents the comparison of how using different neural network based language modeling tools for selecting the best candidate fragments affects the final output translation quality in a hybrid multi-system machine translation setup. Experiments were conducted by comparing perplexity and BLEU scores on common test cases using the same training data set. A 12-gram statistical language model was selected as a baseline to oppose three neural network based models of different characteristics. The models were integrated in a hybrid system that depends on the perplexity score of a sentence fragment to produce the best fitting translations. The results show a correlation between language model perplexity and BLEU scores as well as overall improvements in BLEU.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="rikters-2016-neural">
<titleInfo>
<title>Neural Network Language Models for Candidate Scoring in Hybrid Multi-System Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Matīss</namePart>
<namePart type="family">Rikters</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2016-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Sixth Workshop on Hybrid Approaches to Translation (HyTra6)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Patrik</namePart>
<namePart type="family">Lambert</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bogdan</namePart>
<namePart type="family">Babych</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kurt</namePart>
<namePart type="family">Eberle</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rafael</namePart>
<namePart type="given">E</namePart>
<namePart type="family">Banchs</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Reinhard</namePart>
<namePart type="family">Rapp</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marta</namePart>
<namePart type="given">R</namePart>
<namePart type="family">Costa-jussà</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>The COLING 2016 Organizing Committee</publisher>
<place>
<placeTerm type="text">Osaka, Japan</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper presents the comparison of how using different neural network based language modeling tools for selecting the best candidate fragments affects the final output translation quality in a hybrid multi-system machine translation setup. Experiments were conducted by comparing perplexity and BLEU scores on common test cases using the same training data set. A 12-gram statistical language model was selected as a baseline to oppose three neural network based models of different characteristics. The models were integrated in a hybrid system that depends on the perplexity score of a sentence fragment to produce the best fitting translations. The results show a correlation between language model perplexity and BLEU scores as well as overall improvements in BLEU.</abstract>
<identifier type="citekey">rikters-2016-neural</identifier>
<location>
<url>https://aclanthology.org/W16-4502</url>
</location>
<part>
<date>2016-12</date>
<extent unit="page">
<start>8</start>
<end>15</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Neural Network Language Models for Candidate Scoring in Hybrid Multi-System Machine Translation
%A Rikters, Matīss
%Y Lambert, Patrik
%Y Babych, Bogdan
%Y Eberle, Kurt
%Y Banchs, Rafael E.
%Y Rapp, Reinhard
%Y Costa-jussà, Marta R.
%S Proceedings of the Sixth Workshop on Hybrid Approaches to Translation (HyTra6)
%D 2016
%8 December
%I The COLING 2016 Organizing Committee
%C Osaka, Japan
%F rikters-2016-neural
%X This paper presents the comparison of how using different neural network based language modeling tools for selecting the best candidate fragments affects the final output translation quality in a hybrid multi-system machine translation setup. Experiments were conducted by comparing perplexity and BLEU scores on common test cases using the same training data set. A 12-gram statistical language model was selected as a baseline to oppose three neural network based models of different characteristics. The models were integrated in a hybrid system that depends on the perplexity score of a sentence fragment to produce the best fitting translations. The results show a correlation between language model perplexity and BLEU scores as well as overall improvements in BLEU.
%U https://aclanthology.org/W16-4502
%P 8-15
Markdown (Informal)
[Neural Network Language Models for Candidate Scoring in Hybrid Multi-System Machine Translation](https://aclanthology.org/W16-4502) (Rikters, HyTra 2016)
ACL