@inproceedings{paggio-etal-2021-towards,
title = "Towards a Methodology Supporting Semiautomatic Annotation of {H}ead{M}ovements in Video-recorded Conversations",
author = "Paggio, Patrizia and
Navarretta, Costanza and
Jongejan, Bart and
Agirrezabal, Manex",
editor = "Bonial, Claire and
Xue, Nianwen",
booktitle = "Proceedings of the Joint 15th Linguistic Annotation Workshop (LAW) and 3rd Designing Meaning Representations (DMR) Workshop",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.law-1.16",
doi = "10.18653/v1/2021.law-1.16",
pages = "151--159",
abstract = "We present a method to support the annotation of head movements in video-recorded conversations. Head movement segments from annotated multimodal data are used to train a model to detect head movements in unseen data. The resulting predicted movement sequences are uploaded to the ANVIL tool for post-annotation editing. The automatically identified head movements and the original annotations are compared to assess the overlap between the two. This analysis showed that movement onsets were more easily detected than offsets, and pointed at a number of patterns in the mismatches between original annotations and model predictions that could be dealt with in general terms in post-annotation guidelines.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="paggio-etal-2021-towards">
<titleInfo>
<title>Towards a Methodology Supporting Semiautomatic Annotation of HeadMovements in Video-recorded Conversations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Patrizia</namePart>
<namePart type="family">Paggio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Costanza</namePart>
<namePart type="family">Navarretta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bart</namePart>
<namePart type="family">Jongejan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Manex</namePart>
<namePart type="family">Agirrezabal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Joint 15th Linguistic Annotation Workshop (LAW) and 3rd Designing Meaning Representations (DMR) Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Claire</namePart>
<namePart type="family">Bonial</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nianwen</namePart>
<namePart type="family">Xue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We present a method to support the annotation of head movements in video-recorded conversations. Head movement segments from annotated multimodal data are used to train a model to detect head movements in unseen data. The resulting predicted movement sequences are uploaded to the ANVIL tool for post-annotation editing. The automatically identified head movements and the original annotations are compared to assess the overlap between the two. This analysis showed that movement onsets were more easily detected than offsets, and pointed at a number of patterns in the mismatches between original annotations and model predictions that could be dealt with in general terms in post-annotation guidelines.</abstract>
<identifier type="citekey">paggio-etal-2021-towards</identifier>
<identifier type="doi">10.18653/v1/2021.law-1.16</identifier>
<location>
<url>https://aclanthology.org/2021.law-1.16</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>151</start>
<end>159</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Towards a Methodology Supporting Semiautomatic Annotation of HeadMovements in Video-recorded Conversations
%A Paggio, Patrizia
%A Navarretta, Costanza
%A Jongejan, Bart
%A Agirrezabal, Manex
%Y Bonial, Claire
%Y Xue, Nianwen
%S Proceedings of the Joint 15th Linguistic Annotation Workshop (LAW) and 3rd Designing Meaning Representations (DMR) Workshop
%D 2021
%8 November
%I Association for Computational Linguistics
%C Punta Cana, Dominican Republic
%F paggio-etal-2021-towards
%X We present a method to support the annotation of head movements in video-recorded conversations. Head movement segments from annotated multimodal data are used to train a model to detect head movements in unseen data. The resulting predicted movement sequences are uploaded to the ANVIL tool for post-annotation editing. The automatically identified head movements and the original annotations are compared to assess the overlap between the two. This analysis showed that movement onsets were more easily detected than offsets, and pointed at a number of patterns in the mismatches between original annotations and model predictions that could be dealt with in general terms in post-annotation guidelines.
%R 10.18653/v1/2021.law-1.16
%U https://aclanthology.org/2021.law-1.16
%U https://doi.org/10.18653/v1/2021.law-1.16
%P 151-159
Markdown (Informal)
[Towards a Methodology Supporting Semiautomatic Annotation of HeadMovements in Video-recorded Conversations](https://aclanthology.org/2021.law-1.16) (Paggio et al., LAW 2021)
ACL