@inproceedings{chang-etal-2021-order,
title = "Does the Order of Training Samples Matter? Improving Neural Data-to-Text Generation with Curriculum Learning",
author = "Chang, Ernie and
Yeh, Hui-Syuan and
Demberg, Vera",
editor = "Merlo, Paola and
Tiedemann, Jorg and
Tsarfaty, Reut",
booktitle = "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume",
month = apr,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.eacl-main.61",
doi = "10.18653/v1/2021.eacl-main.61",
pages = "727--733",
abstract = "Recent advancements in data-to-text generation largely take on the form of neural end-to-end systems. Efforts have been dedicated to improving text generation systems by changing the order of training samples in a process known as curriculum learning. Past research on sequence-to-sequence learning showed that curriculum learning helps to improve both the performance and convergence speed. In this work, we delve into the same idea surrounding the training samples consisting of structured data and text pairs, where at each update, the curriculum framework selects training samples based on the model{'}s competence. Specifically, we experiment with various difficulty metrics and put forward a soft edit distance metric for ranking training samples. On our benchmarks, it shows faster convergence speed where training time is reduced by 38.7{\%} and performance is boosted by 4.84 BLEU.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chang-etal-2021-order">
<titleInfo>
<title>Does the Order of Training Samples Matter? Improving Neural Data-to-Text Generation with Curriculum Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ernie</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hui-Syuan</namePart>
<namePart type="family">Yeh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vera</namePart>
<namePart type="family">Demberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume</title>
</titleInfo>
<name type="personal">
<namePart type="given">Paola</namePart>
<namePart type="family">Merlo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jorg</namePart>
<namePart type="family">Tiedemann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Reut</namePart>
<namePart type="family">Tsarfaty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent advancements in data-to-text generation largely take on the form of neural end-to-end systems. Efforts have been dedicated to improving text generation systems by changing the order of training samples in a process known as curriculum learning. Past research on sequence-to-sequence learning showed that curriculum learning helps to improve both the performance and convergence speed. In this work, we delve into the same idea surrounding the training samples consisting of structured data and text pairs, where at each update, the curriculum framework selects training samples based on the model’s competence. Specifically, we experiment with various difficulty metrics and put forward a soft edit distance metric for ranking training samples. On our benchmarks, it shows faster convergence speed where training time is reduced by 38.7% and performance is boosted by 4.84 BLEU.</abstract>
<identifier type="citekey">chang-etal-2021-order</identifier>
<identifier type="doi">10.18653/v1/2021.eacl-main.61</identifier>
<location>
<url>https://aclanthology.org/2021.eacl-main.61</url>
</location>
<part>
<date>2021-04</date>
<extent unit="page">
<start>727</start>
<end>733</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Does the Order of Training Samples Matter? Improving Neural Data-to-Text Generation with Curriculum Learning
%A Chang, Ernie
%A Yeh, Hui-Syuan
%A Demberg, Vera
%Y Merlo, Paola
%Y Tiedemann, Jorg
%Y Tsarfaty, Reut
%S Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume
%D 2021
%8 April
%I Association for Computational Linguistics
%C Online
%F chang-etal-2021-order
%X Recent advancements in data-to-text generation largely take on the form of neural end-to-end systems. Efforts have been dedicated to improving text generation systems by changing the order of training samples in a process known as curriculum learning. Past research on sequence-to-sequence learning showed that curriculum learning helps to improve both the performance and convergence speed. In this work, we delve into the same idea surrounding the training samples consisting of structured data and text pairs, where at each update, the curriculum framework selects training samples based on the model’s competence. Specifically, we experiment with various difficulty metrics and put forward a soft edit distance metric for ranking training samples. On our benchmarks, it shows faster convergence speed where training time is reduced by 38.7% and performance is boosted by 4.84 BLEU.
%R 10.18653/v1/2021.eacl-main.61
%U https://aclanthology.org/2021.eacl-main.61
%U https://doi.org/10.18653/v1/2021.eacl-main.61
%P 727-733
Markdown (Informal)
[Does the Order of Training Samples Matter? Improving Neural Data-to-Text Generation with Curriculum Learning](https://aclanthology.org/2021.eacl-main.61) (Chang et al., EACL 2021)
ACL