@inproceedings{shaib-etal-2024-much,
title = "How Much Annotation is Needed to Compare Summarization Models?",
author = "Shaib, Chantal and
Barrow, Joe and
Siu, Alexa and
Wallace, Byron and
Nenkova, Ani",
editor = "Blodgett, Su Lin and
Cercas Curry, Amanda and
Dev, Sunipa and
Madaio, Michael and
Nenkova, Ani and
Yang, Diyi and
Xiao, Ziang",
booktitle = "Proceedings of the Third Workshop on Bridging Human--Computer Interaction and Natural Language Processing",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.hcinlp-1.5",
doi = "10.18653/v1/2024.hcinlp-1.5",
pages = "51--59",
abstract = "Modern instruction-tuned models have become highly capable in text generation tasks such as summarization, and are expected to be released at a steady pace. In practice one may now wish to choose confidently, but with minimal effort, the best performing summarization model when applied to a new domain or purpose. In this work, we empirically investigate the test sample size necessary to select a preferred model in the context of news summarization. Empirical results reveal that comparative evaluation converges quickly for both automatic and human evaluation, with clear preferences for a system emerging from under 100 examples. The human preference data allows us to quantify how well automatic scores can reproduce preference rankings across a variety of downstream summarization tasks. We find that, while automatic metrics are stable at smaller sample sizes, only some automatic metrics are able to moderately predict model win rates according to human preference.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="shaib-etal-2024-much">
<titleInfo>
<title>How Much Annotation is Needed to Compare Summarization Models?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Chantal</namePart>
<namePart type="family">Shaib</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joe</namePart>
<namePart type="family">Barrow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexa</namePart>
<namePart type="family">Siu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Byron</namePart>
<namePart type="family">Wallace</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ani</namePart>
<namePart type="family">Nenkova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Workshop on Bridging Human–Computer Interaction and Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Su</namePart>
<namePart type="given">Lin</namePart>
<namePart type="family">Blodgett</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amanda</namePart>
<namePart type="family">Cercas Curry</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sunipa</namePart>
<namePart type="family">Dev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Madaio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ani</namePart>
<namePart type="family">Nenkova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Diyi</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ziang</namePart>
<namePart type="family">Xiao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Modern instruction-tuned models have become highly capable in text generation tasks such as summarization, and are expected to be released at a steady pace. In practice one may now wish to choose confidently, but with minimal effort, the best performing summarization model when applied to a new domain or purpose. In this work, we empirically investigate the test sample size necessary to select a preferred model in the context of news summarization. Empirical results reveal that comparative evaluation converges quickly for both automatic and human evaluation, with clear preferences for a system emerging from under 100 examples. The human preference data allows us to quantify how well automatic scores can reproduce preference rankings across a variety of downstream summarization tasks. We find that, while automatic metrics are stable at smaller sample sizes, only some automatic metrics are able to moderately predict model win rates according to human preference.</abstract>
<identifier type="citekey">shaib-etal-2024-much</identifier>
<identifier type="doi">10.18653/v1/2024.hcinlp-1.5</identifier>
<location>
<url>https://aclanthology.org/2024.hcinlp-1.5</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>51</start>
<end>59</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T How Much Annotation is Needed to Compare Summarization Models?
%A Shaib, Chantal
%A Barrow, Joe
%A Siu, Alexa
%A Wallace, Byron
%A Nenkova, Ani
%Y Blodgett, Su Lin
%Y Cercas Curry, Amanda
%Y Dev, Sunipa
%Y Madaio, Michael
%Y Nenkova, Ani
%Y Yang, Diyi
%Y Xiao, Ziang
%S Proceedings of the Third Workshop on Bridging Human–Computer Interaction and Natural Language Processing
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F shaib-etal-2024-much
%X Modern instruction-tuned models have become highly capable in text generation tasks such as summarization, and are expected to be released at a steady pace. In practice one may now wish to choose confidently, but with minimal effort, the best performing summarization model when applied to a new domain or purpose. In this work, we empirically investigate the test sample size necessary to select a preferred model in the context of news summarization. Empirical results reveal that comparative evaluation converges quickly for both automatic and human evaluation, with clear preferences for a system emerging from under 100 examples. The human preference data allows us to quantify how well automatic scores can reproduce preference rankings across a variety of downstream summarization tasks. We find that, while automatic metrics are stable at smaller sample sizes, only some automatic metrics are able to moderately predict model win rates according to human preference.
%R 10.18653/v1/2024.hcinlp-1.5
%U https://aclanthology.org/2024.hcinlp-1.5
%U https://doi.org/10.18653/v1/2024.hcinlp-1.5
%P 51-59
Markdown (Informal)
[How Much Annotation is Needed to Compare Summarization Models?](https://aclanthology.org/2024.hcinlp-1.5) (Shaib et al., HCINLP-WS 2024)
ACL
- Chantal Shaib, Joe Barrow, Alexa Siu, Byron Wallace, and Ani Nenkova. 2024. How Much Annotation is Needed to Compare Summarization Models?. In Proceedings of the Third Workshop on Bridging Human--Computer Interaction and Natural Language Processing, pages 51–59, Mexico City, Mexico. Association for Computational Linguistics.