@inproceedings{iskender-etal-2021-reliability,
title = "Reliability of Human Evaluation for Text Summarization: Lessons Learned and Challenges Ahead",
author = {Iskender, Neslihan and
Polzehl, Tim and
M{\"o}ller, Sebastian},
editor = "Belz, Anya and
Agarwal, Shubham and
Graham, Yvette and
Reiter, Ehud and
Shimorina, Anastasia",
booktitle = "Proceedings of the Workshop on Human Evaluation of NLP Systems (HumEval)",
month = apr,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.humeval-1.10",
pages = "86--96",
abstract = "Only a small portion of research papers with human evaluation for text summarization provide information about the participant demographics, task design, and experiment protocol. Additionally, many researchers use human evaluation as gold standard without questioning the reliability or investigating the factors that might affect the reliability of the human evaluation. As a result, there is a lack of best practices for reliable human summarization evaluation grounded by empirical evidence. To investigate human evaluation reliability, we conduct a series of human evaluation experiments, provide an overview of participant demographics, task design, experimental set-up and compare the results from different experiments. Based on our empirical analysis, we provide guidelines to ensure the reliability of expert and non-expert evaluations, and we determine the factors that might affect the reliability of the human evaluation.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="iskender-etal-2021-reliability">
<titleInfo>
<title>Reliability of Human Evaluation for Text Summarization: Lessons Learned and Challenges Ahead</title>
</titleInfo>
<name type="personal">
<namePart type="given">Neslihan</namePart>
<namePart type="family">Iskender</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tim</namePart>
<namePart type="family">Polzehl</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sebastian</namePart>
<namePart type="family">Möller</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on Human Evaluation of NLP Systems (HumEval)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anya</namePart>
<namePart type="family">Belz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shubham</namePart>
<namePart type="family">Agarwal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yvette</namePart>
<namePart type="family">Graham</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ehud</namePart>
<namePart type="family">Reiter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anastasia</namePart>
<namePart type="family">Shimorina</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Only a small portion of research papers with human evaluation for text summarization provide information about the participant demographics, task design, and experiment protocol. Additionally, many researchers use human evaluation as gold standard without questioning the reliability or investigating the factors that might affect the reliability of the human evaluation. As a result, there is a lack of best practices for reliable human summarization evaluation grounded by empirical evidence. To investigate human evaluation reliability, we conduct a series of human evaluation experiments, provide an overview of participant demographics, task design, experimental set-up and compare the results from different experiments. Based on our empirical analysis, we provide guidelines to ensure the reliability of expert and non-expert evaluations, and we determine the factors that might affect the reliability of the human evaluation.</abstract>
<identifier type="citekey">iskender-etal-2021-reliability</identifier>
<location>
<url>https://aclanthology.org/2021.humeval-1.10</url>
</location>
<part>
<date>2021-04</date>
<extent unit="page">
<start>86</start>
<end>96</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Reliability of Human Evaluation for Text Summarization: Lessons Learned and Challenges Ahead
%A Iskender, Neslihan
%A Polzehl, Tim
%A Möller, Sebastian
%Y Belz, Anya
%Y Agarwal, Shubham
%Y Graham, Yvette
%Y Reiter, Ehud
%Y Shimorina, Anastasia
%S Proceedings of the Workshop on Human Evaluation of NLP Systems (HumEval)
%D 2021
%8 April
%I Association for Computational Linguistics
%C Online
%F iskender-etal-2021-reliability
%X Only a small portion of research papers with human evaluation for text summarization provide information about the participant demographics, task design, and experiment protocol. Additionally, many researchers use human evaluation as gold standard without questioning the reliability or investigating the factors that might affect the reliability of the human evaluation. As a result, there is a lack of best practices for reliable human summarization evaluation grounded by empirical evidence. To investigate human evaluation reliability, we conduct a series of human evaluation experiments, provide an overview of participant demographics, task design, experimental set-up and compare the results from different experiments. Based on our empirical analysis, we provide guidelines to ensure the reliability of expert and non-expert evaluations, and we determine the factors that might affect the reliability of the human evaluation.
%U https://aclanthology.org/2021.humeval-1.10
%P 86-96
Markdown (Informal)
[Reliability of Human Evaluation for Text Summarization: Lessons Learned and Challenges Ahead](https://aclanthology.org/2021.humeval-1.10) (Iskender et al., HumEval 2021)
ACL