@inproceedings{tang-etal-2024-grounding,
title = "Grounding Language in Multi-Perspective Referential Communication",
author = "Tang, Zineng and
Mao, Lingjun and
Suhr, Alane",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.emnlp-main.1100",
doi = "10.18653/v1/2024.emnlp-main.1100",
pages = "19727--19741",
abstract = "We introduce a task and dataset for referring expression generation and comprehension in multi-agent embodied environments.In this task, two agents in a shared scene must take into account one another{'}s visual perspective, which may be different from their own, to both produce and understand references to objects in a scene and the spatial relations between them.We collect a dataset of 2,970 human-written referring expressions, each paired with human comprehension judgments, and evaluate the performance of automated models as speakers and listeners paired with human partners, finding that model performance in both reference generation and comprehension lags behind that of pairs of human agents.Finally, we experiment training an open-weight speaker model with evidence of communicative success when paired with a listener, resulting in an improvement from 58.9 to 69.3{\%} in communicative success and even outperforming the strongest proprietary model.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tang-etal-2024-grounding">
<titleInfo>
<title>Grounding Language in Multi-Perspective Referential Communication</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zineng</namePart>
<namePart type="family">Tang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lingjun</namePart>
<namePart type="family">Mao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alane</namePart>
<namePart type="family">Suhr</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We introduce a task and dataset for referring expression generation and comprehension in multi-agent embodied environments.In this task, two agents in a shared scene must take into account one another’s visual perspective, which may be different from their own, to both produce and understand references to objects in a scene and the spatial relations between them.We collect a dataset of 2,970 human-written referring expressions, each paired with human comprehension judgments, and evaluate the performance of automated models as speakers and listeners paired with human partners, finding that model performance in both reference generation and comprehension lags behind that of pairs of human agents.Finally, we experiment training an open-weight speaker model with evidence of communicative success when paired with a listener, resulting in an improvement from 58.9 to 69.3% in communicative success and even outperforming the strongest proprietary model.</abstract>
<identifier type="citekey">tang-etal-2024-grounding</identifier>
<identifier type="doi">10.18653/v1/2024.emnlp-main.1100</identifier>
<location>
<url>https://aclanthology.org/2024.emnlp-main.1100</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>19727</start>
<end>19741</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Grounding Language in Multi-Perspective Referential Communication
%A Tang, Zineng
%A Mao, Lingjun
%A Suhr, Alane
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F tang-etal-2024-grounding
%X We introduce a task and dataset for referring expression generation and comprehension in multi-agent embodied environments.In this task, two agents in a shared scene must take into account one another’s visual perspective, which may be different from their own, to both produce and understand references to objects in a scene and the spatial relations between them.We collect a dataset of 2,970 human-written referring expressions, each paired with human comprehension judgments, and evaluate the performance of automated models as speakers and listeners paired with human partners, finding that model performance in both reference generation and comprehension lags behind that of pairs of human agents.Finally, we experiment training an open-weight speaker model with evidence of communicative success when paired with a listener, resulting in an improvement from 58.9 to 69.3% in communicative success and even outperforming the strongest proprietary model.
%R 10.18653/v1/2024.emnlp-main.1100
%U https://aclanthology.org/2024.emnlp-main.1100
%U https://doi.org/10.18653/v1/2024.emnlp-main.1100
%P 19727-19741
Markdown (Informal)
[Grounding Language in Multi-Perspective Referential Communication](https://aclanthology.org/2024.emnlp-main.1100) (Tang et al., EMNLP 2024)
ACL