@inproceedings{nn-2019-easy,
title = "{EASY}-{M}: Evaluation System for Multilingual Summarizers",
editor = "Giannakopoulos, George",
booktitle = "Proceedings of the Workshop MultiLing 2019: Summarization Across Languages, Genres and Sources",
month = sep,
year = "2019",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd.",
url = "https://aclanthology.org/W19-8908",
doi = "10.26615/978-954-452-058-8_008",
pages = "53--62",
abstract = "Automatic text summarization aims at producing a shorter version of a document (or a document set). Evaluation of summarization quality is a challenging task. Because human evaluations are expensive and evaluators often disagree between themselves, many researchers prefer to evaluate their systems automatically, with help of software tools. Such a tool usually requires a point of reference in the form of one or more human-written summaries for each text in the corpus. Then, a system-generated summary is compared to one or more human-written summaries, according to selected metrics. However, a single metric cannot reflect all quality-related aspects of a summary. In this paper we present the EvAluation SYstem for Multilingual Summarization (EASY-M), which enables the evaluation of system-generated summaries in 17 different languages with several quality measures, based on comparison with their human-generated counterparts. The system also provides comparative results with two built-in baselines. The source code and both online and offline versions of EASY-M is freely available for the NLP community.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="nn-2019-easy">
<titleInfo>
<title>EASY-M: Evaluation System for Multilingual Summarizers</title>
</titleInfo>
<originInfo>
<dateIssued>2019-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop MultiLing 2019: Summarization Across Languages, Genres and Sources</title>
</titleInfo>
<name type="personal">
<namePart type="given">George</namePart>
<namePart type="family">Giannakopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd.</publisher>
<place>
<placeTerm type="text">Varna, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Automatic text summarization aims at producing a shorter version of a document (or a document set). Evaluation of summarization quality is a challenging task. Because human evaluations are expensive and evaluators often disagree between themselves, many researchers prefer to evaluate their systems automatically, with help of software tools. Such a tool usually requires a point of reference in the form of one or more human-written summaries for each text in the corpus. Then, a system-generated summary is compared to one or more human-written summaries, according to selected metrics. However, a single metric cannot reflect all quality-related aspects of a summary. In this paper we present the EvAluation SYstem for Multilingual Summarization (EASY-M), which enables the evaluation of system-generated summaries in 17 different languages with several quality measures, based on comparison with their human-generated counterparts. The system also provides comparative results with two built-in baselines. The source code and both online and offline versions of EASY-M is freely available for the NLP community.</abstract>
<identifier type="citekey">nn-2019-easy</identifier>
<identifier type="doi">10.26615/978-954-452-058-8_008</identifier>
<location>
<url>https://aclanthology.org/W19-8908</url>
</location>
<part>
<date>2019-09</date>
<extent unit="page">
<start>53</start>
<end>62</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T EASY-M: Evaluation System for Multilingual Summarizers
%Y Giannakopoulos, George
%S Proceedings of the Workshop MultiLing 2019: Summarization Across Languages, Genres and Sources
%D 2019
%8 September
%I INCOMA Ltd.
%C Varna, Bulgaria
%F nn-2019-easy
%X Automatic text summarization aims at producing a shorter version of a document (or a document set). Evaluation of summarization quality is a challenging task. Because human evaluations are expensive and evaluators often disagree between themselves, many researchers prefer to evaluate their systems automatically, with help of software tools. Such a tool usually requires a point of reference in the form of one or more human-written summaries for each text in the corpus. Then, a system-generated summary is compared to one or more human-written summaries, according to selected metrics. However, a single metric cannot reflect all quality-related aspects of a summary. In this paper we present the EvAluation SYstem for Multilingual Summarization (EASY-M), which enables the evaluation of system-generated summaries in 17 different languages with several quality measures, based on comparison with their human-generated counterparts. The system also provides comparative results with two built-in baselines. The source code and both online and offline versions of EASY-M is freely available for the NLP community.
%R 10.26615/978-954-452-058-8_008
%U https://aclanthology.org/W19-8908
%U https://doi.org/10.26615/978-954-452-058-8_008
%P 53-62
Markdown (Informal)
[EASY-M: Evaluation System for Multilingual Summarizers](https://aclanthology.org/W19-8908) (Giannakopoulos, RANLP 2019)
ACL