@inproceedings{das-etal-2016-study,
title = "A study of attention-based neural machine translation model on {I}ndian languages",
author = "Das, Ayan and
Yerra, Pranay and
Kumar, Ken and
Sarkar, Sudeshna",
editor = "Wu, Dekai and
Bhattacharyya, Pushpak",
booktitle = "Proceedings of the 6th Workshop on South and Southeast {A}sian Natural Language Processing ({WSSANLP}2016)",
month = dec,
year = "2016",
address = "Osaka, Japan",
publisher = "The COLING 2016 Organizing Committee",
url = "https://aclanthology.org/W16-3717",
pages = "163--172",
abstract = "Neural machine translation (NMT) models have recently been shown to be very successful in machine translation (MT). The use of LSTMs in machine translation has significantly improved the translation performance for longer sentences by being able to capture the context and long range correlations of the sentences in their hidden layers. The attention model based NMT system (Bahdanau et al., 2014) has become the state-of-the-art, performing equal or better than other statistical MT approaches. In this paper, we wish to study the performance of the attention-model based NMT system (Bahdanau et al., 2014) on the Indian language pair, Hindi and Bengali, and do an analysis on the types or errors that occur in case when the languages are morphologically rich and there is a scarcity of large parallel training corpus. We then carry out certain post-processing heuristic steps to improve the quality of the translated statements and suggest further measures that can be carried out.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="das-etal-2016-study">
<titleInfo>
<title>A study of attention-based neural machine translation model on Indian languages</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ayan</namePart>
<namePart type="family">Das</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pranay</namePart>
<namePart type="family">Yerra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ken</namePart>
<namePart type="family">Kumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sudeshna</namePart>
<namePart type="family">Sarkar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2016-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 6th Workshop on South and Southeast Asian Natural Language Processing (WSSANLP2016)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dekai</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pushpak</namePart>
<namePart type="family">Bhattacharyya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>The COLING 2016 Organizing Committee</publisher>
<place>
<placeTerm type="text">Osaka, Japan</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Neural machine translation (NMT) models have recently been shown to be very successful in machine translation (MT). The use of LSTMs in machine translation has significantly improved the translation performance for longer sentences by being able to capture the context and long range correlations of the sentences in their hidden layers. The attention model based NMT system (Bahdanau et al., 2014) has become the state-of-the-art, performing equal or better than other statistical MT approaches. In this paper, we wish to study the performance of the attention-model based NMT system (Bahdanau et al., 2014) on the Indian language pair, Hindi and Bengali, and do an analysis on the types or errors that occur in case when the languages are morphologically rich and there is a scarcity of large parallel training corpus. We then carry out certain post-processing heuristic steps to improve the quality of the translated statements and suggest further measures that can be carried out.</abstract>
<identifier type="citekey">das-etal-2016-study</identifier>
<location>
<url>https://aclanthology.org/W16-3717</url>
</location>
<part>
<date>2016-12</date>
<extent unit="page">
<start>163</start>
<end>172</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A study of attention-based neural machine translation model on Indian languages
%A Das, Ayan
%A Yerra, Pranay
%A Kumar, Ken
%A Sarkar, Sudeshna
%Y Wu, Dekai
%Y Bhattacharyya, Pushpak
%S Proceedings of the 6th Workshop on South and Southeast Asian Natural Language Processing (WSSANLP2016)
%D 2016
%8 December
%I The COLING 2016 Organizing Committee
%C Osaka, Japan
%F das-etal-2016-study
%X Neural machine translation (NMT) models have recently been shown to be very successful in machine translation (MT). The use of LSTMs in machine translation has significantly improved the translation performance for longer sentences by being able to capture the context and long range correlations of the sentences in their hidden layers. The attention model based NMT system (Bahdanau et al., 2014) has become the state-of-the-art, performing equal or better than other statistical MT approaches. In this paper, we wish to study the performance of the attention-model based NMT system (Bahdanau et al., 2014) on the Indian language pair, Hindi and Bengali, and do an analysis on the types or errors that occur in case when the languages are morphologically rich and there is a scarcity of large parallel training corpus. We then carry out certain post-processing heuristic steps to improve the quality of the translated statements and suggest further measures that can be carried out.
%U https://aclanthology.org/W16-3717
%P 163-172
Markdown (Informal)
[A study of attention-based neural machine translation model on Indian languages](https://aclanthology.org/W16-3717) (Das et al., WSSANLP 2016)
ACL