@inproceedings{bhandari-etal-2020-metrics,
title = "Metrics also Disagree in the Low Scoring Range: Revisiting Summarization Evaluation Metrics",
author = "Bhandari, Manik and
Gour, Pranav Narayan and
Ashfaq, Atabak and
Liu, Pengfei",
editor = "Scott, Donia and
Bel, Nuria and
Zong, Chengqing",
booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "International Committee on Computational Linguistics",
url = "https://aclanthology.org/2020.coling-main.501",
doi = "10.18653/v1/2020.coling-main.501",
pages = "5702--5711",
abstract = "In text summarization, evaluating the efficacy of automatic metrics without human judgments has become recently popular. One exemplar work (Peyrard, 2019) concludes that automatic metrics strongly disagree when ranking high-scoring summaries. In this paper, we revisit their experiments and find that their observations stem from the fact that metrics disagree in ranking summaries from any narrow scoring range. We hypothesize that this may be because summaries are similar to each other in a narrow scoring range and are thus, difficult to rank. Apart from the width of the scoring range of summaries, we analyze three other properties that impact inter-metric agreement - Ease of Summarization, Abstractiveness, and Coverage.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bhandari-etal-2020-metrics">
<titleInfo>
<title>Metrics also Disagree in the Low Scoring Range: Revisiting Summarization Evaluation Metrics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Manik</namePart>
<namePart type="family">Bhandari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pranav</namePart>
<namePart type="given">Narayan</namePart>
<namePart type="family">Gour</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Atabak</namePart>
<namePart type="family">Ashfaq</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pengfei</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 28th International Conference on Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Donia</namePart>
<namePart type="family">Scott</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nuria</namePart>
<namePart type="family">Bel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chengqing</namePart>
<namePart type="family">Zong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>International Committee on Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona, Spain (Online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In text summarization, evaluating the efficacy of automatic metrics without human judgments has become recently popular. One exemplar work (Peyrard, 2019) concludes that automatic metrics strongly disagree when ranking high-scoring summaries. In this paper, we revisit their experiments and find that their observations stem from the fact that metrics disagree in ranking summaries from any narrow scoring range. We hypothesize that this may be because summaries are similar to each other in a narrow scoring range and are thus, difficult to rank. Apart from the width of the scoring range of summaries, we analyze three other properties that impact inter-metric agreement - Ease of Summarization, Abstractiveness, and Coverage.</abstract>
<identifier type="citekey">bhandari-etal-2020-metrics</identifier>
<identifier type="doi">10.18653/v1/2020.coling-main.501</identifier>
<location>
<url>https://aclanthology.org/2020.coling-main.501</url>
</location>
<part>
<date>2020-12</date>
<extent unit="page">
<start>5702</start>
<end>5711</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Metrics also Disagree in the Low Scoring Range: Revisiting Summarization Evaluation Metrics
%A Bhandari, Manik
%A Gour, Pranav Narayan
%A Ashfaq, Atabak
%A Liu, Pengfei
%Y Scott, Donia
%Y Bel, Nuria
%Y Zong, Chengqing
%S Proceedings of the 28th International Conference on Computational Linguistics
%D 2020
%8 December
%I International Committee on Computational Linguistics
%C Barcelona, Spain (Online)
%F bhandari-etal-2020-metrics
%X In text summarization, evaluating the efficacy of automatic metrics without human judgments has become recently popular. One exemplar work (Peyrard, 2019) concludes that automatic metrics strongly disagree when ranking high-scoring summaries. In this paper, we revisit their experiments and find that their observations stem from the fact that metrics disagree in ranking summaries from any narrow scoring range. We hypothesize that this may be because summaries are similar to each other in a narrow scoring range and are thus, difficult to rank. Apart from the width of the scoring range of summaries, we analyze three other properties that impact inter-metric agreement - Ease of Summarization, Abstractiveness, and Coverage.
%R 10.18653/v1/2020.coling-main.501
%U https://aclanthology.org/2020.coling-main.501
%U https://doi.org/10.18653/v1/2020.coling-main.501
%P 5702-5711
Markdown (Informal)
[Metrics also Disagree in the Low Scoring Range: Revisiting Summarization Evaluation Metrics](https://aclanthology.org/2020.coling-main.501) (Bhandari et al., COLING 2020)
ACL