@inproceedings{stylianou-vlahavas-2021-corelm,
title = "{C}ore{LM}: Coreference-aware Language Model Fine-Tuning",
author = "Stylianou, Nikolaos and
Vlahavas, Ioannis",
editor = "Ogrodniczuk, Maciej and
Pradhan, Sameer and
Poesio, Massimo and
Grishina, Yulia and
Ng, Vincent",
booktitle = "Proceedings of the Fourth Workshop on Computational Models of Reference, Anaphora and Coreference",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.crac-1.8",
doi = "10.18653/v1/2021.crac-1.8",
pages = "70--81",
abstract = "Language Models are the underpin of all modern Natural Language Processing (NLP) tasks. The introduction of the Transformers architecture has contributed significantly into making Language Modeling very effective across many NLP task, leading to significant advancements in the field. However, Transformers come with a big computational cost, which grows quadratically with respect to the input length. This presents a challenge as to understand long texts requires a lot of context. In this paper, we propose a Fine-Tuning framework, named CoreLM, that extends the architecture of current Pretrained Language Models so that they incorporate explicit entity information. By introducing entity representations, we make available information outside the contextual space of the model, which results in a better Language Model for a fraction of the computational cost. We implement our approach using GPT2 and compare the fine-tuned model to the original. Our proposed model achieves a lower Perplexity in GUMBY and LAMBDADA datasets when compared to GPT2 and a fine-tuned version of GPT2 without any changes. We also compare the models{'} performance in terms of Accuracy in LAMBADA and Children{'}s Book Test, with and without the use of model-created coreference annotations.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="stylianou-vlahavas-2021-corelm">
<titleInfo>
<title>CoreLM: Coreference-aware Language Model Fine-Tuning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nikolaos</namePart>
<namePart type="family">Stylianou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ioannis</namePart>
<namePart type="family">Vlahavas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourth Workshop on Computational Models of Reference, Anaphora and Coreference</title>
</titleInfo>
<name type="personal">
<namePart type="given">Maciej</namePart>
<namePart type="family">Ogrodniczuk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sameer</namePart>
<namePart type="family">Pradhan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Massimo</namePart>
<namePart type="family">Poesio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulia</namePart>
<namePart type="family">Grishina</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vincent</namePart>
<namePart type="family">Ng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Language Models are the underpin of all modern Natural Language Processing (NLP) tasks. The introduction of the Transformers architecture has contributed significantly into making Language Modeling very effective across many NLP task, leading to significant advancements in the field. However, Transformers come with a big computational cost, which grows quadratically with respect to the input length. This presents a challenge as to understand long texts requires a lot of context. In this paper, we propose a Fine-Tuning framework, named CoreLM, that extends the architecture of current Pretrained Language Models so that they incorporate explicit entity information. By introducing entity representations, we make available information outside the contextual space of the model, which results in a better Language Model for a fraction of the computational cost. We implement our approach using GPT2 and compare the fine-tuned model to the original. Our proposed model achieves a lower Perplexity in GUMBY and LAMBDADA datasets when compared to GPT2 and a fine-tuned version of GPT2 without any changes. We also compare the models’ performance in terms of Accuracy in LAMBADA and Children’s Book Test, with and without the use of model-created coreference annotations.</abstract>
<identifier type="citekey">stylianou-vlahavas-2021-corelm</identifier>
<identifier type="doi">10.18653/v1/2021.crac-1.8</identifier>
<location>
<url>https://aclanthology.org/2021.crac-1.8</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>70</start>
<end>81</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T CoreLM: Coreference-aware Language Model Fine-Tuning
%A Stylianou, Nikolaos
%A Vlahavas, Ioannis
%Y Ogrodniczuk, Maciej
%Y Pradhan, Sameer
%Y Poesio, Massimo
%Y Grishina, Yulia
%Y Ng, Vincent
%S Proceedings of the Fourth Workshop on Computational Models of Reference, Anaphora and Coreference
%D 2021
%8 November
%I Association for Computational Linguistics
%C Punta Cana, Dominican Republic
%F stylianou-vlahavas-2021-corelm
%X Language Models are the underpin of all modern Natural Language Processing (NLP) tasks. The introduction of the Transformers architecture has contributed significantly into making Language Modeling very effective across many NLP task, leading to significant advancements in the field. However, Transformers come with a big computational cost, which grows quadratically with respect to the input length. This presents a challenge as to understand long texts requires a lot of context. In this paper, we propose a Fine-Tuning framework, named CoreLM, that extends the architecture of current Pretrained Language Models so that they incorporate explicit entity information. By introducing entity representations, we make available information outside the contextual space of the model, which results in a better Language Model for a fraction of the computational cost. We implement our approach using GPT2 and compare the fine-tuned model to the original. Our proposed model achieves a lower Perplexity in GUMBY and LAMBDADA datasets when compared to GPT2 and a fine-tuned version of GPT2 without any changes. We also compare the models’ performance in terms of Accuracy in LAMBADA and Children’s Book Test, with and without the use of model-created coreference annotations.
%R 10.18653/v1/2021.crac-1.8
%U https://aclanthology.org/2021.crac-1.8
%U https://doi.org/10.18653/v1/2021.crac-1.8
%P 70-81
Markdown (Informal)
[CoreLM: Coreference-aware Language Model Fine-Tuning](https://aclanthology.org/2021.crac-1.8) (Stylianou & Vlahavas, CRAC 2021)
ACL
- Nikolaos Stylianou and Ioannis Vlahavas. 2021. CoreLM: Coreference-aware Language Model Fine-Tuning. In Proceedings of the Fourth Workshop on Computational Models of Reference, Anaphora and Coreference, pages 70–81, Punta Cana, Dominican Republic. Association for Computational Linguistics.