@inproceedings{flachs-etal-2019-historical,
title = "Historical Text Normalization with Delayed Rewards",
author = "Flachs, Simon and
Bollmann, Marcel and
S{\o}gaard, Anders",
editor = "Korhonen, Anna and
Traum, David and
M{\`a}rquez, Llu{\'\i}s",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P19-1157",
doi = "10.18653/v1/P19-1157",
pages = "1614--1619",
abstract = "Training neural sequence-to-sequence models with simple token-level log-likelihood is now a standard approach to historical text normalization, albeit often outperformed by phrase-based models. Policy gradient training enables direct optimization for exact matches, and while the small datasets in historical text normalization are prohibitive of from-scratch reinforcement learning, we show that policy gradient fine-tuning leads to significant improvements across the board. Policy gradient training, in particular, leads to more accurate normalizations for long or unseen words.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="flachs-etal-2019-historical">
<titleInfo>
<title>Historical Text Normalization with Delayed Rewards</title>
</titleInfo>
<name type="personal">
<namePart type="given">Simon</namePart>
<namePart type="family">Flachs</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marcel</namePart>
<namePart type="family">Bollmann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anders</namePart>
<namePart type="family">Søgaard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Korhonen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Traum</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lluís</namePart>
<namePart type="family">Màrquez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Florence, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Training neural sequence-to-sequence models with simple token-level log-likelihood is now a standard approach to historical text normalization, albeit often outperformed by phrase-based models. Policy gradient training enables direct optimization for exact matches, and while the small datasets in historical text normalization are prohibitive of from-scratch reinforcement learning, we show that policy gradient fine-tuning leads to significant improvements across the board. Policy gradient training, in particular, leads to more accurate normalizations for long or unseen words.</abstract>
<identifier type="citekey">flachs-etal-2019-historical</identifier>
<identifier type="doi">10.18653/v1/P19-1157</identifier>
<location>
<url>https://aclanthology.org/P19-1157</url>
</location>
<part>
<date>2019-07</date>
<extent unit="page">
<start>1614</start>
<end>1619</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Historical Text Normalization with Delayed Rewards
%A Flachs, Simon
%A Bollmann, Marcel
%A Søgaard, Anders
%Y Korhonen, Anna
%Y Traum, David
%Y Màrquez, Lluís
%S Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics
%D 2019
%8 July
%I Association for Computational Linguistics
%C Florence, Italy
%F flachs-etal-2019-historical
%X Training neural sequence-to-sequence models with simple token-level log-likelihood is now a standard approach to historical text normalization, albeit often outperformed by phrase-based models. Policy gradient training enables direct optimization for exact matches, and while the small datasets in historical text normalization are prohibitive of from-scratch reinforcement learning, we show that policy gradient fine-tuning leads to significant improvements across the board. Policy gradient training, in particular, leads to more accurate normalizations for long or unseen words.
%R 10.18653/v1/P19-1157
%U https://aclanthology.org/P19-1157
%U https://doi.org/10.18653/v1/P19-1157
%P 1614-1619
Markdown (Informal)
[Historical Text Normalization with Delayed Rewards](https://aclanthology.org/P19-1157) (Flachs et al., ACL 2019)
ACL
- Simon Flachs, Marcel Bollmann, and Anders Søgaard. 2019. Historical Text Normalization with Delayed Rewards. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1614–1619, Florence, Italy. Association for Computational Linguistics.