@inproceedings{loaiciga-etal-2021-reference,
title = "Reference and coreference in situated dialogue",
author = "Lo{\'a}iciga, Sharid and
Dobnik, Simon and
Schlangen, David",
editor = "{Xin} and
Hu, Ronghang and
Hudson, Drew and
Fu, Tsu-Jui and
Rohrbach, Marcus and
Fried, Daniel",
booktitle = "Proceedings of the Second Workshop on Advances in Language and Vision Research",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.alvr-1.7",
doi = "10.18653/v1/2021.alvr-1.7",
pages = "39--44",
abstract = "In recent years several corpora have been developed for vision and language tasks. We argue that there is still significant room for corpora that increase the complexity of both visual and linguistic domains and which capture different varieties of perceptual and conversational contexts. Working with two corpora approaching this goal, we present a linguistic perspective on some of the challenges in creating and extending resources combining language and vision while preserving continuity with the existing best practices in the area of coreference annotation.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="loaiciga-etal-2021-reference">
<titleInfo>
<title>Reference and coreference in situated dialogue</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sharid</namePart>
<namePart type="family">Loáiciga</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Simon</namePart>
<namePart type="family">Dobnik</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Schlangen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Advances in Language and Vision Research</title>
</titleInfo>
<name>
<namePart>Xin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ronghang</namePart>
<namePart type="family">Hu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Drew</namePart>
<namePart type="family">Hudson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tsu-Jui</namePart>
<namePart type="family">Fu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marcus</namePart>
<namePart type="family">Rohrbach</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Fried</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In recent years several corpora have been developed for vision and language tasks. We argue that there is still significant room for corpora that increase the complexity of both visual and linguistic domains and which capture different varieties of perceptual and conversational contexts. Working with two corpora approaching this goal, we present a linguistic perspective on some of the challenges in creating and extending resources combining language and vision while preserving continuity with the existing best practices in the area of coreference annotation.</abstract>
<identifier type="citekey">loaiciga-etal-2021-reference</identifier>
<identifier type="doi">10.18653/v1/2021.alvr-1.7</identifier>
<location>
<url>https://aclanthology.org/2021.alvr-1.7</url>
</location>
<part>
<date>2021-06</date>
<extent unit="page">
<start>39</start>
<end>44</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Reference and coreference in situated dialogue
%A Loáiciga, Sharid
%A Dobnik, Simon
%A Schlangen, David
%Y Hu, Ronghang
%Y Hudson, Drew
%Y Fu, Tsu-Jui
%Y Rohrbach, Marcus
%Y Fried, Daniel
%E Xin
%S Proceedings of the Second Workshop on Advances in Language and Vision Research
%D 2021
%8 June
%I Association for Computational Linguistics
%C Online
%F loaiciga-etal-2021-reference
%X In recent years several corpora have been developed for vision and language tasks. We argue that there is still significant room for corpora that increase the complexity of both visual and linguistic domains and which capture different varieties of perceptual and conversational contexts. Working with two corpora approaching this goal, we present a linguistic perspective on some of the challenges in creating and extending resources combining language and vision while preserving continuity with the existing best practices in the area of coreference annotation.
%R 10.18653/v1/2021.alvr-1.7
%U https://aclanthology.org/2021.alvr-1.7
%U https://doi.org/10.18653/v1/2021.alvr-1.7
%P 39-44
Markdown (Informal)
[Reference and coreference in situated dialogue](https://aclanthology.org/2021.alvr-1.7) (Loáiciga et al., ALVR 2021)
ACL
- Sharid Loáiciga, Simon Dobnik, and David Schlangen. 2021. Reference and coreference in situated dialogue. In Proceedings of the Second Workshop on Advances in Language and Vision Research, pages 39–44, Online. Association for Computational Linguistics.