@inproceedings{anthonio-roth-2021-resolving,
title = "Resolving Implicit References in Instructional Texts",
author = "Anthonio, Talita and
Roth, Michael",
editor = "Braud, Chlo{\'e} and
Hardmeier, Christian and
Li, Junyi Jessy and
Louis, Annie and
Strube, Michael and
Zeldes, Amir",
booktitle = "Proceedings of the 2nd Workshop on Computational Approaches to Discourse",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic and Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.codi-main.6",
doi = "10.18653/v1/2021.codi-main.6",
pages = "58--71",
abstract = "The usage of (co-)referring expressions in discourse contributes to the coherence of a text. However, text comprehension can be difficult when referring expressions are non-verbalized and have to be resolved in the discourse context. In this paper, we propose a novel dataset of such implicit references, which we automatically derive from insertions of references in collaboratively edited how-to guides. Our dataset consists of 6,014 instances, making it one of the largest datasets of implicit references and a useful starting point to investigate misunderstandings caused by underspecified language. We test different methods for resolving implicit references in our dataset based on the Generative Pre-trained Transformer model (GPT) and compare them to heuristic baselines. Our experiments indicate that GPT can accurately resolve the majority of implicit references in our data. Finally, we investigate remaining errors and examine human preferences regarding different resolutions of an implicit reference given the discourse context.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="anthonio-roth-2021-resolving">
<titleInfo>
<title>Resolving Implicit References in Instructional Texts</title>
</titleInfo>
<name type="personal">
<namePart type="given">Talita</namePart>
<namePart type="family">Anthonio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Roth</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Workshop on Computational Approaches to Discourse</title>
</titleInfo>
<name type="personal">
<namePart type="given">Chloé</namePart>
<namePart type="family">Braud</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christian</namePart>
<namePart type="family">Hardmeier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Junyi</namePart>
<namePart type="given">Jessy</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Annie</namePart>
<namePart type="family">Louis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Strube</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amir</namePart>
<namePart type="family">Zeldes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Punta Cana, Dominican Republic and Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The usage of (co-)referring expressions in discourse contributes to the coherence of a text. However, text comprehension can be difficult when referring expressions are non-verbalized and have to be resolved in the discourse context. In this paper, we propose a novel dataset of such implicit references, which we automatically derive from insertions of references in collaboratively edited how-to guides. Our dataset consists of 6,014 instances, making it one of the largest datasets of implicit references and a useful starting point to investigate misunderstandings caused by underspecified language. We test different methods for resolving implicit references in our dataset based on the Generative Pre-trained Transformer model (GPT) and compare them to heuristic baselines. Our experiments indicate that GPT can accurately resolve the majority of implicit references in our data. Finally, we investigate remaining errors and examine human preferences regarding different resolutions of an implicit reference given the discourse context.</abstract>
<identifier type="citekey">anthonio-roth-2021-resolving</identifier>
<identifier type="doi">10.18653/v1/2021.codi-main.6</identifier>
<location>
<url>https://aclanthology.org/2021.codi-main.6</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>58</start>
<end>71</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Resolving Implicit References in Instructional Texts
%A Anthonio, Talita
%A Roth, Michael
%Y Braud, Chloé
%Y Hardmeier, Christian
%Y Li, Junyi Jessy
%Y Louis, Annie
%Y Strube, Michael
%Y Zeldes, Amir
%S Proceedings of the 2nd Workshop on Computational Approaches to Discourse
%D 2021
%8 November
%I Association for Computational Linguistics
%C Punta Cana, Dominican Republic and Online
%F anthonio-roth-2021-resolving
%X The usage of (co-)referring expressions in discourse contributes to the coherence of a text. However, text comprehension can be difficult when referring expressions are non-verbalized and have to be resolved in the discourse context. In this paper, we propose a novel dataset of such implicit references, which we automatically derive from insertions of references in collaboratively edited how-to guides. Our dataset consists of 6,014 instances, making it one of the largest datasets of implicit references and a useful starting point to investigate misunderstandings caused by underspecified language. We test different methods for resolving implicit references in our dataset based on the Generative Pre-trained Transformer model (GPT) and compare them to heuristic baselines. Our experiments indicate that GPT can accurately resolve the majority of implicit references in our data. Finally, we investigate remaining errors and examine human preferences regarding different resolutions of an implicit reference given the discourse context.
%R 10.18653/v1/2021.codi-main.6
%U https://aclanthology.org/2021.codi-main.6
%U https://doi.org/10.18653/v1/2021.codi-main.6
%P 58-71
Markdown (Informal)
[Resolving Implicit References in Instructional Texts](https://aclanthology.org/2021.codi-main.6) (Anthonio & Roth, CODI 2021)
ACL
- Talita Anthonio and Michael Roth. 2021. Resolving Implicit References in Instructional Texts. In Proceedings of the 2nd Workshop on Computational Approaches to Discourse, pages 58–71, Punta Cana, Dominican Republic and Online. Association for Computational Linguistics.