@inproceedings{zhu-etal-2025-multimodal-common,
title = "Multimodal Common Ground Annotation for Partial Information Collaborative Problem Solving",
author = "Zhu, Yifan and
Jung, Changsoo and
Lai, Kenneth and
Venkatesha, Videep and
Bradford, Mariah and
Fitzgerald, Jack and
Jamil, Huma and
Graff, Carine and
Kumar, Sai Kiran Ganesh and
Draper, Bruce and
Blanchard, Nathaniel and
Pustejovsky, James and
Krishnaswamy, Nikhil",
editor = "Harry, Bunt",
booktitle = "Proceedings of the 21st Joint ACL - ISO Workshop on Interoperable Semantic Annotation (ISA-21)",
month = sep,
year = "2025",
address = {D{\"u}sseldorf, Germany},
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.isa-1.9/",
pages = "85--91",
ISBN = "979-8-89176-319-7",
abstract = "This project note describes challenges and procedures undertaken in annotating an audiovisual dataset capturing a multimodal situated collaborative construction task. In the task, all participants begin with different partial information, and must collaborate using speech, gesture, and action to arrive a solution that satisfies all individual pieces of private information. This rich data poses a number of annotation challenges, from small objects in a close space, to the implicit and multimodal fashion in which participants express agreement, disagreement, and beliefs. We discuss the data collection procedure, annotation schemas and tools, and future use cases."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhu-etal-2025-multimodal-common">
<titleInfo>
<title>Multimodal Common Ground Annotation for Partial Information Collaborative Problem Solving</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yifan</namePart>
<namePart type="family">Zhu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Changsoo</namePart>
<namePart type="family">Jung</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kenneth</namePart>
<namePart type="family">Lai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Videep</namePart>
<namePart type="family">Venkatesha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mariah</namePart>
<namePart type="family">Bradford</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jack</namePart>
<namePart type="family">Fitzgerald</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Huma</namePart>
<namePart type="family">Jamil</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carine</namePart>
<namePart type="family">Graff</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sai</namePart>
<namePart type="given">Kiran</namePart>
<namePart type="given">Ganesh</namePart>
<namePart type="family">Kumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bruce</namePart>
<namePart type="family">Draper</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nathaniel</namePart>
<namePart type="family">Blanchard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">James</namePart>
<namePart type="family">Pustejovsky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nikhil</namePart>
<namePart type="family">Krishnaswamy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 21st Joint ACL - ISO Workshop on Interoperable Semantic Annotation (ISA-21)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Bunt</namePart>
<namePart type="family">Harry</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Düsseldorf, Germany</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-319-7</identifier>
</relatedItem>
<abstract>This project note describes challenges and procedures undertaken in annotating an audiovisual dataset capturing a multimodal situated collaborative construction task. In the task, all participants begin with different partial information, and must collaborate using speech, gesture, and action to arrive a solution that satisfies all individual pieces of private information. This rich data poses a number of annotation challenges, from small objects in a close space, to the implicit and multimodal fashion in which participants express agreement, disagreement, and beliefs. We discuss the data collection procedure, annotation schemas and tools, and future use cases.</abstract>
<identifier type="citekey">zhu-etal-2025-multimodal-common</identifier>
<location>
<url>https://aclanthology.org/2025.isa-1.9/</url>
</location>
<part>
<date>2025-09</date>
<extent unit="page">
<start>85</start>
<end>91</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Multimodal Common Ground Annotation for Partial Information Collaborative Problem Solving
%A Zhu, Yifan
%A Jung, Changsoo
%A Lai, Kenneth
%A Venkatesha, Videep
%A Bradford, Mariah
%A Fitzgerald, Jack
%A Jamil, Huma
%A Graff, Carine
%A Kumar, Sai Kiran Ganesh
%A Draper, Bruce
%A Blanchard, Nathaniel
%A Pustejovsky, James
%A Krishnaswamy, Nikhil
%Y Harry, Bunt
%S Proceedings of the 21st Joint ACL - ISO Workshop on Interoperable Semantic Annotation (ISA-21)
%D 2025
%8 September
%I Association for Computational Linguistics
%C Düsseldorf, Germany
%@ 979-8-89176-319-7
%F zhu-etal-2025-multimodal-common
%X This project note describes challenges and procedures undertaken in annotating an audiovisual dataset capturing a multimodal situated collaborative construction task. In the task, all participants begin with different partial information, and must collaborate using speech, gesture, and action to arrive a solution that satisfies all individual pieces of private information. This rich data poses a number of annotation challenges, from small objects in a close space, to the implicit and multimodal fashion in which participants express agreement, disagreement, and beliefs. We discuss the data collection procedure, annotation schemas and tools, and future use cases.
%U https://aclanthology.org/2025.isa-1.9/
%P 85-91
Markdown (Informal)
[Multimodal Common Ground Annotation for Partial Information Collaborative Problem Solving](https://aclanthology.org/2025.isa-1.9/) (Zhu et al., ISA 2025)
ACL
- Yifan Zhu, Changsoo Jung, Kenneth Lai, Videep Venkatesha, Mariah Bradford, Jack Fitzgerald, Huma Jamil, Carine Graff, Sai Kiran Ganesh Kumar, Bruce Draper, Nathaniel Blanchard, James Pustejovsky, and Nikhil Krishnaswamy. 2025. Multimodal Common Ground Annotation for Partial Information Collaborative Problem Solving. In Proceedings of the 21st Joint ACL - ISO Workshop on Interoperable Semantic Annotation (ISA-21), pages 85–91, Düsseldorf, Germany. Association for Computational Linguistics.