@inproceedings{edunov-etal-2018-classical,
title = "Classical Structured Prediction Losses for Sequence to Sequence Learning",
author = "Edunov, Sergey and
Ott, Myle and
Auli, Michael and
Grangier, David and
Ranzato, Marc{'}Aurelio",
editor = "Walker, Marilyn and
Ji, Heng and
Stent, Amanda",
booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)",
month = jun,
year = "2018",
address = "New Orleans, Louisiana",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/N18-1033/",
doi = "10.18653/v1/N18-1033",
pages = "355--364",
abstract = "There has been much recent work on training neural attention models at the sequence-level using either reinforcement learning-style methods or by optimizing the beam. In this paper, we survey a range of classical objective functions that have been widely used to train linear models for structured prediction and apply them to neural sequence to sequence models. Our experiments show that these losses can perform surprisingly well by slightly outperforming beam search optimization in a like for like setup. We also report new state of the art results on both IWSLT`14 German-English translation as well as Gigaword abstractive summarization. On the large WMT`14 English-French task, sequence-level training achieves 41.5 BLEU which is on par with the state of the art."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="edunov-etal-2018-classical">
<titleInfo>
<title>Classical Structured Prediction Losses for Sequence to Sequence Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sergey</namePart>
<namePart type="family">Edunov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Myle</namePart>
<namePart type="family">Ott</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Auli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Grangier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marc’Aurelio</namePart>
<namePart type="family">Ranzato</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marilyn</namePart>
<namePart type="family">Walker</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Heng</namePart>
<namePart type="family">Ji</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amanda</namePart>
<namePart type="family">Stent</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">New Orleans, Louisiana</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>There has been much recent work on training neural attention models at the sequence-level using either reinforcement learning-style methods or by optimizing the beam. In this paper, we survey a range of classical objective functions that have been widely used to train linear models for structured prediction and apply them to neural sequence to sequence models. Our experiments show that these losses can perform surprisingly well by slightly outperforming beam search optimization in a like for like setup. We also report new state of the art results on both IWSLT‘14 German-English translation as well as Gigaword abstractive summarization. On the large WMT‘14 English-French task, sequence-level training achieves 41.5 BLEU which is on par with the state of the art.</abstract>
<identifier type="citekey">edunov-etal-2018-classical</identifier>
<identifier type="doi">10.18653/v1/N18-1033</identifier>
<location>
<url>https://aclanthology.org/N18-1033/</url>
</location>
<part>
<date>2018-06</date>
<extent unit="page">
<start>355</start>
<end>364</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Classical Structured Prediction Losses for Sequence to Sequence Learning
%A Edunov, Sergey
%A Ott, Myle
%A Auli, Michael
%A Grangier, David
%A Ranzato, Marc’Aurelio
%Y Walker, Marilyn
%Y Ji, Heng
%Y Stent, Amanda
%S Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)
%D 2018
%8 June
%I Association for Computational Linguistics
%C New Orleans, Louisiana
%F edunov-etal-2018-classical
%X There has been much recent work on training neural attention models at the sequence-level using either reinforcement learning-style methods or by optimizing the beam. In this paper, we survey a range of classical objective functions that have been widely used to train linear models for structured prediction and apply them to neural sequence to sequence models. Our experiments show that these losses can perform surprisingly well by slightly outperforming beam search optimization in a like for like setup. We also report new state of the art results on both IWSLT‘14 German-English translation as well as Gigaword abstractive summarization. On the large WMT‘14 English-French task, sequence-level training achieves 41.5 BLEU which is on par with the state of the art.
%R 10.18653/v1/N18-1033
%U https://aclanthology.org/N18-1033/
%U https://doi.org/10.18653/v1/N18-1033
%P 355-364
Markdown (Informal)
[Classical Structured Prediction Losses for Sequence to Sequence Learning](https://aclanthology.org/N18-1033/) (Edunov et al., NAACL 2018)
ACL
- Sergey Edunov, Myle Ott, Michael Auli, David Grangier, and Marc’Aurelio Ranzato. 2018. Classical Structured Prediction Losses for Sequence to Sequence Learning. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 355–364, New Orleans, Louisiana. Association for Computational Linguistics.