@inproceedings{collins-etal-2017-supervised,
title = "A Supervised Approach to Extractive Summarisation of Scientific Papers",
author = "Collins, Ed and
Augenstein, Isabelle and
Riedel, Sebastian",
editor = "Levy, Roger and
Specia, Lucia",
booktitle = "Proceedings of the 21st Conference on Computational Natural Language Learning ({C}o{NLL} 2017)",
month = aug,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/K17-1021",
doi = "10.18653/v1/K17-1021",
pages = "195--205",
abstract = "Automatic summarisation is a popular approach to reduce a document to its main arguments. Recent research in the area has focused on neural approaches to summarisation, which can be very data-hungry. However, few large datasets exist and none for the traditionally popular domain of scientific publications, which opens up challenging research avenues centered on encoding large, complex documents. In this paper, we introduce a new dataset for summarisation of computer science publications by exploiting a large resource of author provided summaries and show straightforward ways of extending it further. We develop models on the dataset making use of both neural sentence encoding and traditionally used summarisation features and show that models which encode sentences as well as their local and global context perform best, significantly outperforming well-established baseline methods.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="collins-etal-2017-supervised">
<titleInfo>
<title>A Supervised Approach to Extractive Summarisation of Scientific Papers</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ed</namePart>
<namePart type="family">Collins</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isabelle</namePart>
<namePart type="family">Augenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sebastian</namePart>
<namePart type="family">Riedel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Roger</namePart>
<namePart type="family">Levy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucia</namePart>
<namePart type="family">Specia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vancouver, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Automatic summarisation is a popular approach to reduce a document to its main arguments. Recent research in the area has focused on neural approaches to summarisation, which can be very data-hungry. However, few large datasets exist and none for the traditionally popular domain of scientific publications, which opens up challenging research avenues centered on encoding large, complex documents. In this paper, we introduce a new dataset for summarisation of computer science publications by exploiting a large resource of author provided summaries and show straightforward ways of extending it further. We develop models on the dataset making use of both neural sentence encoding and traditionally used summarisation features and show that models which encode sentences as well as their local and global context perform best, significantly outperforming well-established baseline methods.</abstract>
<identifier type="citekey">collins-etal-2017-supervised</identifier>
<identifier type="doi">10.18653/v1/K17-1021</identifier>
<location>
<url>https://aclanthology.org/K17-1021</url>
</location>
<part>
<date>2017-08</date>
<extent unit="page">
<start>195</start>
<end>205</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Supervised Approach to Extractive Summarisation of Scientific Papers
%A Collins, Ed
%A Augenstein, Isabelle
%A Riedel, Sebastian
%Y Levy, Roger
%Y Specia, Lucia
%S Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017)
%D 2017
%8 August
%I Association for Computational Linguistics
%C Vancouver, Canada
%F collins-etal-2017-supervised
%X Automatic summarisation is a popular approach to reduce a document to its main arguments. Recent research in the area has focused on neural approaches to summarisation, which can be very data-hungry. However, few large datasets exist and none for the traditionally popular domain of scientific publications, which opens up challenging research avenues centered on encoding large, complex documents. In this paper, we introduce a new dataset for summarisation of computer science publications by exploiting a large resource of author provided summaries and show straightforward ways of extending it further. We develop models on the dataset making use of both neural sentence encoding and traditionally used summarisation features and show that models which encode sentences as well as their local and global context perform best, significantly outperforming well-established baseline methods.
%R 10.18653/v1/K17-1021
%U https://aclanthology.org/K17-1021
%U https://doi.org/10.18653/v1/K17-1021
%P 195-205
Markdown (Informal)
[A Supervised Approach to Extractive Summarisation of Scientific Papers](https://aclanthology.org/K17-1021) (Collins et al., CoNLL 2017)
ACL