@inproceedings{peyrard-etal-2017-learning,
title = "Learning to Score System Summaries for Better Content Selection Evaluation.",
author = "Peyrard, Maxime and
Botschen, Teresa and
Gurevych, Iryna",
editor = "Wang, Lu and
Cheung, Jackie Chi Kit and
Carenini, Giuseppe and
Liu, Fei",
booktitle = "Proceedings of the Workshop on New Frontiers in Summarization",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4510",
doi = "10.18653/v1/W17-4510",
pages = "74--84",
abstract = "The evaluation of summaries is a challenging but crucial task of the summarization field. In this work, we propose to learn an automatic scoring metric based on the human judgements available as part of classical summarization datasets like TAC-2008 and TAC-2009. Any existing automatic scoring metrics can be included as features, the model learns the combination exhibiting the best correlation with human judgments. The reliability of the new metric is tested in a further manual evaluation where we ask humans to evaluate summaries covering the whole scoring spectrum of the metric. We release the trained metric as an open-source tool.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="peyrard-etal-2017-learning">
<titleInfo>
<title>Learning to Score System Summaries for Better Content Selection Evaluation.</title>
</titleInfo>
<name type="personal">
<namePart type="given">Maxime</namePart>
<namePart type="family">Peyrard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Teresa</namePart>
<namePart type="family">Botschen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Iryna</namePart>
<namePart type="family">Gurevych</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on New Frontiers in Summarization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lu</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jackie</namePart>
<namePart type="given">Chi</namePart>
<namePart type="given">Kit</namePart>
<namePart type="family">Cheung</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Giuseppe</namePart>
<namePart type="family">Carenini</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fei</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Copenhagen, Denmark</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The evaluation of summaries is a challenging but crucial task of the summarization field. In this work, we propose to learn an automatic scoring metric based on the human judgements available as part of classical summarization datasets like TAC-2008 and TAC-2009. Any existing automatic scoring metrics can be included as features, the model learns the combination exhibiting the best correlation with human judgments. The reliability of the new metric is tested in a further manual evaluation where we ask humans to evaluate summaries covering the whole scoring spectrum of the metric. We release the trained metric as an open-source tool.</abstract>
<identifier type="citekey">peyrard-etal-2017-learning</identifier>
<identifier type="doi">10.18653/v1/W17-4510</identifier>
<location>
<url>https://aclanthology.org/W17-4510</url>
</location>
<part>
<date>2017-09</date>
<extent unit="page">
<start>74</start>
<end>84</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Learning to Score System Summaries for Better Content Selection Evaluation.
%A Peyrard, Maxime
%A Botschen, Teresa
%A Gurevych, Iryna
%Y Wang, Lu
%Y Cheung, Jackie Chi Kit
%Y Carenini, Giuseppe
%Y Liu, Fei
%S Proceedings of the Workshop on New Frontiers in Summarization
%D 2017
%8 September
%I Association for Computational Linguistics
%C Copenhagen, Denmark
%F peyrard-etal-2017-learning
%X The evaluation of summaries is a challenging but crucial task of the summarization field. In this work, we propose to learn an automatic scoring metric based on the human judgements available as part of classical summarization datasets like TAC-2008 and TAC-2009. Any existing automatic scoring metrics can be included as features, the model learns the combination exhibiting the best correlation with human judgments. The reliability of the new metric is tested in a further manual evaluation where we ask humans to evaluate summaries covering the whole scoring spectrum of the metric. We release the trained metric as an open-source tool.
%R 10.18653/v1/W17-4510
%U https://aclanthology.org/W17-4510
%U https://doi.org/10.18653/v1/W17-4510
%P 74-84
Markdown (Informal)
[Learning to Score System Summaries for Better Content Selection Evaluation.](https://aclanthology.org/W17-4510) (Peyrard et al., 2017)
ACL