@inproceedings{arumae-liu-2018-reinforced,
title = "Reinforced Extractive Summarization with Question-Focused Rewards",
author = "Arumae, Kristjan and
Liu, Fei",
editor = "Shwartz, Vered and
Tabassum, Jeniya and
Voigt, Rob and
Che, Wanxiang and
de Marneffe, Marie-Catherine and
Nissim, Malvina",
booktitle = "Proceedings of {ACL} 2018, Student Research Workshop",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P18-3015",
doi = "10.18653/v1/P18-3015",
pages = "105--111",
abstract = "We investigate a new training paradigm for extractive summarization. Traditionally, human abstracts are used to derive goldstandard labels for extraction units. However, the labels are often inaccurate, because human abstracts and source documents cannot be easily aligned at the word level. In this paper we convert human abstracts to a set of Cloze-style comprehension questions. System summaries are encouraged to preserve salient source content useful for answering questions and share common words with the abstracts. We use reinforcement learning to explore the space of possible extractive summaries and introduce a question-focused reward function to promote concise, fluent, and informative summaries. Our experiments show that the proposed method is effective. It surpasses state-of-the-art systems on the standard summarization dataset.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="arumae-liu-2018-reinforced">
<titleInfo>
<title>Reinforced Extractive Summarization with Question-Focused Rewards</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kristjan</namePart>
<namePart type="family">Arumae</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fei</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of ACL 2018, Student Research Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vered</namePart>
<namePart type="family">Shwartz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jeniya</namePart>
<namePart type="family">Tabassum</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rob</namePart>
<namePart type="family">Voigt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marie-Catherine</namePart>
<namePart type="family">de Marneffe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Malvina</namePart>
<namePart type="family">Nissim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We investigate a new training paradigm for extractive summarization. Traditionally, human abstracts are used to derive goldstandard labels for extraction units. However, the labels are often inaccurate, because human abstracts and source documents cannot be easily aligned at the word level. In this paper we convert human abstracts to a set of Cloze-style comprehension questions. System summaries are encouraged to preserve salient source content useful for answering questions and share common words with the abstracts. We use reinforcement learning to explore the space of possible extractive summaries and introduce a question-focused reward function to promote concise, fluent, and informative summaries. Our experiments show that the proposed method is effective. It surpasses state-of-the-art systems on the standard summarization dataset.</abstract>
<identifier type="citekey">arumae-liu-2018-reinforced</identifier>
<identifier type="doi">10.18653/v1/P18-3015</identifier>
<location>
<url>https://aclanthology.org/P18-3015</url>
</location>
<part>
<date>2018-07</date>
<extent unit="page">
<start>105</start>
<end>111</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Reinforced Extractive Summarization with Question-Focused Rewards
%A Arumae, Kristjan
%A Liu, Fei
%Y Shwartz, Vered
%Y Tabassum, Jeniya
%Y Voigt, Rob
%Y Che, Wanxiang
%Y de Marneffe, Marie-Catherine
%Y Nissim, Malvina
%S Proceedings of ACL 2018, Student Research Workshop
%D 2018
%8 July
%I Association for Computational Linguistics
%C Melbourne, Australia
%F arumae-liu-2018-reinforced
%X We investigate a new training paradigm for extractive summarization. Traditionally, human abstracts are used to derive goldstandard labels for extraction units. However, the labels are often inaccurate, because human abstracts and source documents cannot be easily aligned at the word level. In this paper we convert human abstracts to a set of Cloze-style comprehension questions. System summaries are encouraged to preserve salient source content useful for answering questions and share common words with the abstracts. We use reinforcement learning to explore the space of possible extractive summaries and introduce a question-focused reward function to promote concise, fluent, and informative summaries. Our experiments show that the proposed method is effective. It surpasses state-of-the-art systems on the standard summarization dataset.
%R 10.18653/v1/P18-3015
%U https://aclanthology.org/P18-3015
%U https://doi.org/10.18653/v1/P18-3015
%P 105-111
Markdown (Informal)
[Reinforced Extractive Summarization with Question-Focused Rewards](https://aclanthology.org/P18-3015) (Arumae & Liu, ACL 2018)
ACL