@inproceedings{wang-etal-2021-transformer,
title = "Transformer-Based Direct Hidden {M}arkov Model for Machine Translation",
author = "Wang, Weiyue and
Yang, Zijian and
Gao, Yingbo and
Ney, Hermann",
editor = "Kabbara, Jad and
Lin, Haitao and
Paullada, Amandalynne and
Vamvas, Jannis",
booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: Student Research Workshop",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.acl-srw.3",
doi = "10.18653/v1/2021.acl-srw.3",
pages = "23--32",
abstract = "The neural hidden Markov model has been proposed as an alternative to attention mechanism in machine translation with recurrent neural networks. However, since the introduction of the transformer models, its performance has been surpassed. This work proposes to introduce the concept of the hidden Markov model to the transformer architecture, which outperforms the transformer baseline. Interestingly, we find that the zero-order model already provides promising performance, giving it an edge compared to a model with first-order dependency, which performs similarly but is significantly slower in training and decoding.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wang-etal-2021-transformer">
<titleInfo>
<title>Transformer-Based Direct Hidden Markov Model for Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Weiyue</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zijian</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yingbo</namePart>
<namePart type="family">Gao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hermann</namePart>
<namePart type="family">Ney</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: Student Research Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jad</namePart>
<namePart type="family">Kabbara</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Haitao</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amandalynne</namePart>
<namePart type="family">Paullada</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jannis</namePart>
<namePart type="family">Vamvas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The neural hidden Markov model has been proposed as an alternative to attention mechanism in machine translation with recurrent neural networks. However, since the introduction of the transformer models, its performance has been surpassed. This work proposes to introduce the concept of the hidden Markov model to the transformer architecture, which outperforms the transformer baseline. Interestingly, we find that the zero-order model already provides promising performance, giving it an edge compared to a model with first-order dependency, which performs similarly but is significantly slower in training and decoding.</abstract>
<identifier type="citekey">wang-etal-2021-transformer</identifier>
<identifier type="doi">10.18653/v1/2021.acl-srw.3</identifier>
<location>
<url>https://aclanthology.org/2021.acl-srw.3</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>23</start>
<end>32</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Transformer-Based Direct Hidden Markov Model for Machine Translation
%A Wang, Weiyue
%A Yang, Zijian
%A Gao, Yingbo
%A Ney, Hermann
%Y Kabbara, Jad
%Y Lin, Haitao
%Y Paullada, Amandalynne
%Y Vamvas, Jannis
%S Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: Student Research Workshop
%D 2021
%8 August
%I Association for Computational Linguistics
%C Online
%F wang-etal-2021-transformer
%X The neural hidden Markov model has been proposed as an alternative to attention mechanism in machine translation with recurrent neural networks. However, since the introduction of the transformer models, its performance has been surpassed. This work proposes to introduce the concept of the hidden Markov model to the transformer architecture, which outperforms the transformer baseline. Interestingly, we find that the zero-order model already provides promising performance, giving it an edge compared to a model with first-order dependency, which performs similarly but is significantly slower in training and decoding.
%R 10.18653/v1/2021.acl-srw.3
%U https://aclanthology.org/2021.acl-srw.3
%U https://doi.org/10.18653/v1/2021.acl-srw.3
%P 23-32
Markdown (Informal)
[Transformer-Based Direct Hidden Markov Model for Machine Translation](https://aclanthology.org/2021.acl-srw.3) (Wang et al., ACL-IJCNLP 2021)
ACL
- Weiyue Wang, Zijian Yang, Yingbo Gao, and Hermann Ney. 2021. Transformer-Based Direct Hidden Markov Model for Machine Translation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: Student Research Workshop, pages 23–32, Online. Association for Computational Linguistics.