@inproceedings{van-dongen-etal-2020-schubert,
title = "{SC}hu{BERT}: Scholarly Document Chunks with {BERT}-encoding boost Citation Count Prediction.",
author = "van Dongen, Thomas and
Maillette de Buy Wenniger, Gideon and
Schomaker, Lambert",
editor = "Chandrasekaran, Muthu Kumar and
de Waard, Anita and
Feigenblat, Guy and
Freitag, Dayne and
Ghosal, Tirthankar and
Hovy, Eduard and
Knoth, Petr and
Konopnicki, David and
Mayr, Philipp and
Patton, Robert M. and
Shmueli-Scheuer, Michal",
booktitle = "Proceedings of the First Workshop on Scholarly Document Processing",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.sdp-1.17/",
doi = "10.18653/v1/2020.sdp-1.17",
pages = "148--157",
abstract = "Predicting the number of citations of scholarly documents is an upcoming task in scholarly document processing. Besides the intrinsic merit of this information, it also has a wider use as an imperfect proxy for quality which has the advantage of being cheaply available for large volumes of scholarly documents. Previous work has dealt with number of citations prediction with relatively small training data sets, or larger datasets but with short, incomplete input text. In this work we leverage the open access ACL Anthology collection in combination with the Semantic Scholar bibliometric database to create a large corpus of scholarly documents with associated citation information and we propose a new citation prediction model called SChuBERT. In our experiments we compare SChuBERT with several state-of-the-art citation prediction models and show that it outperforms previous methods by a large margin. We also show the merit of using more training data and longer input for number of citations prediction."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="van-dongen-etal-2020-schubert">
<titleInfo>
<title>SChuBERT: Scholarly Document Chunks with BERT-encoding boost Citation Count Prediction.</title>
</titleInfo>
<name type="personal">
<namePart type="given">Thomas</namePart>
<namePart type="family">van Dongen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gideon</namePart>
<namePart type="family">Maillette de Buy Wenniger</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lambert</namePart>
<namePart type="family">Schomaker</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Scholarly Document Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Muthu</namePart>
<namePart type="given">Kumar</namePart>
<namePart type="family">Chandrasekaran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anita</namePart>
<namePart type="family">de Waard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guy</namePart>
<namePart type="family">Feigenblat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dayne</namePart>
<namePart type="family">Freitag</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tirthankar</namePart>
<namePart type="family">Ghosal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eduard</namePart>
<namePart type="family">Hovy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Petr</namePart>
<namePart type="family">Knoth</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Konopnicki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Philipp</namePart>
<namePart type="family">Mayr</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Robert</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Patton</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michal</namePart>
<namePart type="family">Shmueli-Scheuer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Predicting the number of citations of scholarly documents is an upcoming task in scholarly document processing. Besides the intrinsic merit of this information, it also has a wider use as an imperfect proxy for quality which has the advantage of being cheaply available for large volumes of scholarly documents. Previous work has dealt with number of citations prediction with relatively small training data sets, or larger datasets but with short, incomplete input text. In this work we leverage the open access ACL Anthology collection in combination with the Semantic Scholar bibliometric database to create a large corpus of scholarly documents with associated citation information and we propose a new citation prediction model called SChuBERT. In our experiments we compare SChuBERT with several state-of-the-art citation prediction models and show that it outperforms previous methods by a large margin. We also show the merit of using more training data and longer input for number of citations prediction.</abstract>
<identifier type="citekey">van-dongen-etal-2020-schubert</identifier>
<identifier type="doi">10.18653/v1/2020.sdp-1.17</identifier>
<location>
<url>https://aclanthology.org/2020.sdp-1.17/</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>148</start>
<end>157</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T SChuBERT: Scholarly Document Chunks with BERT-encoding boost Citation Count Prediction.
%A van Dongen, Thomas
%A Maillette de Buy Wenniger, Gideon
%A Schomaker, Lambert
%Y Chandrasekaran, Muthu Kumar
%Y de Waard, Anita
%Y Feigenblat, Guy
%Y Freitag, Dayne
%Y Ghosal, Tirthankar
%Y Hovy, Eduard
%Y Knoth, Petr
%Y Konopnicki, David
%Y Mayr, Philipp
%Y Patton, Robert M.
%Y Shmueli-Scheuer, Michal
%S Proceedings of the First Workshop on Scholarly Document Processing
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F van-dongen-etal-2020-schubert
%X Predicting the number of citations of scholarly documents is an upcoming task in scholarly document processing. Besides the intrinsic merit of this information, it also has a wider use as an imperfect proxy for quality which has the advantage of being cheaply available for large volumes of scholarly documents. Previous work has dealt with number of citations prediction with relatively small training data sets, or larger datasets but with short, incomplete input text. In this work we leverage the open access ACL Anthology collection in combination with the Semantic Scholar bibliometric database to create a large corpus of scholarly documents with associated citation information and we propose a new citation prediction model called SChuBERT. In our experiments we compare SChuBERT with several state-of-the-art citation prediction models and show that it outperforms previous methods by a large margin. We also show the merit of using more training data and longer input for number of citations prediction.
%R 10.18653/v1/2020.sdp-1.17
%U https://aclanthology.org/2020.sdp-1.17/
%U https://doi.org/10.18653/v1/2020.sdp-1.17
%P 148-157
Markdown (Informal)
[SChuBERT: Scholarly Document Chunks with BERT-encoding boost Citation Count Prediction.](https://aclanthology.org/2020.sdp-1.17/) (van Dongen et al., sdp 2020)
ACL