@inproceedings{kumar-cheung-2019-understanding,
    title = "{U}nderstanding the {B}ehaviour of {N}eural {A}bstractive {S}ummarizers using {C}ontrastive {E}xamples",
    author = "Kumar, Krtin  and
      Cheung, Jackie Chi Kit",
    editor = "Burstein, Jill  and
      Doran, Christy  and
      Solorio, Thamar",
    booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)",
    month = jun,
    year = "2019",
    address = "Minneapolis, Minnesota",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/N19-1396/",
    doi = "10.18653/v1/N19-1396",
    pages = "3949--3954",
    abstract = "Neural abstractive summarizers generate summary texts using a language model conditioned on the input source text, and have recently achieved high ROUGE scores on benchmark summarization datasets. We investigate how they achieve this performance with respect to human-written gold-standard abstracts, and whether the systems are able to understand deeper syntactic and semantic structures. We generate a set of contrastive summaries which are perturbed, deficient versions of human-written summaries, and test whether existing neural summarizers score them more highly than the human-written summaries. We analyze their performance on different datasets and find that these systems fail to understand the source text, in a majority of the cases."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kumar-cheung-2019-understanding">
    <titleInfo>
        <title>Understanding the Behaviour of Neural Abstractive Summarizers using Contrastive Examples</title>
    </titleInfo>
    <name type="personal">
        <namePart type="given">Krtin</namePart>
        <namePart type="family">Kumar</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Jackie</namePart>
        <namePart type="given">Chi</namePart>
        <namePart type="given">Kit</namePart>
        <namePart type="family">Cheung</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <originInfo>
        <dateIssued>2019-06</dateIssued>
    </originInfo>
    <typeOfResource>text</typeOfResource>
    <relatedItem type="host">
        <titleInfo>
            <title>Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)</title>
        </titleInfo>
        <name type="personal">
            <namePart type="given">Jill</namePart>
            <namePart type="family">Burstein</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <name type="personal">
            <namePart type="given">Christy</namePart>
            <namePart type="family">Doran</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <name type="personal">
            <namePart type="given">Thamar</namePart>
            <namePart type="family">Solorio</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <originInfo>
            <publisher>Association for Computational Linguistics</publisher>
            <place>
                <placeTerm type="text">Minneapolis, Minnesota</placeTerm>
            </place>
        </originInfo>
        <genre authority="marcgt">conference publication</genre>
    </relatedItem>
    <abstract>Neural abstractive summarizers generate summary texts using a language model conditioned on the input source text, and have recently achieved high ROUGE scores on benchmark summarization datasets. We investigate how they achieve this performance with respect to human-written gold-standard abstracts, and whether the systems are able to understand deeper syntactic and semantic structures. We generate a set of contrastive summaries which are perturbed, deficient versions of human-written summaries, and test whether existing neural summarizers score them more highly than the human-written summaries. We analyze their performance on different datasets and find that these systems fail to understand the source text, in a majority of the cases.</abstract>
    <identifier type="citekey">kumar-cheung-2019-understanding</identifier>
    <identifier type="doi">10.18653/v1/N19-1396</identifier>
    <location>
        <url>https://aclanthology.org/N19-1396/</url>
    </location>
    <part>
        <date>2019-06</date>
        <extent unit="page">
            <start>3949</start>
            <end>3954</end>
        </extent>
    </part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Understanding the Behaviour of Neural Abstractive Summarizers using Contrastive Examples
%A Kumar, Krtin
%A Cheung, Jackie Chi Kit
%Y Burstein, Jill
%Y Doran, Christy
%Y Solorio, Thamar
%S Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)
%D 2019
%8 June
%I Association for Computational Linguistics
%C Minneapolis, Minnesota
%F kumar-cheung-2019-understanding
%X Neural abstractive summarizers generate summary texts using a language model conditioned on the input source text, and have recently achieved high ROUGE scores on benchmark summarization datasets. We investigate how they achieve this performance with respect to human-written gold-standard abstracts, and whether the systems are able to understand deeper syntactic and semantic structures. We generate a set of contrastive summaries which are perturbed, deficient versions of human-written summaries, and test whether existing neural summarizers score them more highly than the human-written summaries. We analyze their performance on different datasets and find that these systems fail to understand the source text, in a majority of the cases.
%R 10.18653/v1/N19-1396
%U https://aclanthology.org/N19-1396/
%U https://doi.org/10.18653/v1/N19-1396
%P 3949-3954
Markdown (Informal)
[Understanding the Behaviour of Neural Abstractive Summarizers using Contrastive Examples](https://aclanthology.org/N19-1396/) (Kumar & Cheung, NAACL 2019)
ACL