@inproceedings{urbizu-etal-2020-sequence,
title = "Sequence to Sequence Coreference Resolution",
author = "Urbizu, Gorka and
Soraluze, Ander and
Arregi, Olatz",
editor = "Ogrodniczuk, Maciej and
Ng, Vincent and
Grishina, Yulia and
Pradhan, Sameer",
booktitle = "Proceedings of the Third Workshop on Computational Models of Reference, Anaphora and Coreference",
month = dec,
year = "2020",
address = "Barcelona, Spain (online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.crac-1.5/",
pages = "39--46",
abstract = "Until recently, coreference resolution has been a critical task on the pipeline of any NLP task involving deep language understanding, such as machine translation, chatbots, summarization or sentiment analysis. However, nowadays, those end tasks are learned end-to-end by deep neural networks without adding any explicit knowledge about coreference. Thus, coreference resolution is used less in the training of other NLP tasks or trending pretrained language models. In this paper we present a new approach to face coreference resolution as a sequence to sequence task based on the Transformer architecture. This approach is simple and universal, compatible with any language or dataset (regardless of singletons) and easier to integrate with current language models architectures. We test it on the ARRAU corpus, where we get 65.6 F1 CoNLL. We see this approach not as a final goal, but a means to pretrain sequence to sequence language models (T5) on coreference resolution."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="urbizu-etal-2020-sequence">
<titleInfo>
<title>Sequence to Sequence Coreference Resolution</title>
</titleInfo>
<name type="personal">
<namePart type="given">Gorka</namePart>
<namePart type="family">Urbizu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ander</namePart>
<namePart type="family">Soraluze</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Olatz</namePart>
<namePart type="family">Arregi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Workshop on Computational Models of Reference, Anaphora and Coreference</title>
</titleInfo>
<name type="personal">
<namePart type="given">Maciej</namePart>
<namePart type="family">Ogrodniczuk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vincent</namePart>
<namePart type="family">Ng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulia</namePart>
<namePart type="family">Grishina</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sameer</namePart>
<namePart type="family">Pradhan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona, Spain (online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Until recently, coreference resolution has been a critical task on the pipeline of any NLP task involving deep language understanding, such as machine translation, chatbots, summarization or sentiment analysis. However, nowadays, those end tasks are learned end-to-end by deep neural networks without adding any explicit knowledge about coreference. Thus, coreference resolution is used less in the training of other NLP tasks or trending pretrained language models. In this paper we present a new approach to face coreference resolution as a sequence to sequence task based on the Transformer architecture. This approach is simple and universal, compatible with any language or dataset (regardless of singletons) and easier to integrate with current language models architectures. We test it on the ARRAU corpus, where we get 65.6 F1 CoNLL. We see this approach not as a final goal, but a means to pretrain sequence to sequence language models (T5) on coreference resolution.</abstract>
<identifier type="citekey">urbizu-etal-2020-sequence</identifier>
<location>
<url>https://aclanthology.org/2020.crac-1.5/</url>
</location>
<part>
<date>2020-12</date>
<extent unit="page">
<start>39</start>
<end>46</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Sequence to Sequence Coreference Resolution
%A Urbizu, Gorka
%A Soraluze, Ander
%A Arregi, Olatz
%Y Ogrodniczuk, Maciej
%Y Ng, Vincent
%Y Grishina, Yulia
%Y Pradhan, Sameer
%S Proceedings of the Third Workshop on Computational Models of Reference, Anaphora and Coreference
%D 2020
%8 December
%I Association for Computational Linguistics
%C Barcelona, Spain (online)
%F urbizu-etal-2020-sequence
%X Until recently, coreference resolution has been a critical task on the pipeline of any NLP task involving deep language understanding, such as machine translation, chatbots, summarization or sentiment analysis. However, nowadays, those end tasks are learned end-to-end by deep neural networks without adding any explicit knowledge about coreference. Thus, coreference resolution is used less in the training of other NLP tasks or trending pretrained language models. In this paper we present a new approach to face coreference resolution as a sequence to sequence task based on the Transformer architecture. This approach is simple and universal, compatible with any language or dataset (regardless of singletons) and easier to integrate with current language models architectures. We test it on the ARRAU corpus, where we get 65.6 F1 CoNLL. We see this approach not as a final goal, but a means to pretrain sequence to sequence language models (T5) on coreference resolution.
%U https://aclanthology.org/2020.crac-1.5/
%P 39-46
Markdown (Informal)
[Sequence to Sequence Coreference Resolution](https://aclanthology.org/2020.crac-1.5/) (Urbizu et al., CRAC 2020)
ACL
- Gorka Urbizu, Ander Soraluze, and Olatz Arregi. 2020. Sequence to Sequence Coreference Resolution. In Proceedings of the Third Workshop on Computational Models of Reference, Anaphora and Coreference, pages 39–46, Barcelona, Spain (online). Association for Computational Linguistics.