@inproceedings{petukhova-bunt-2010-towards,
title = "Towards an Integrated Scheme for Semantic Annotation of Multimodal Dialogue Data",
author = "Petukhova, Volha and
Bunt, Harry",
editor = "Calzolari, Nicoletta and
Choukri, Khalid and
Maegaard, Bente and
Mariani, Joseph and
Odijk, Jan and
Piperidis, Stelios and
Rosner, Mike and
Tapias, Daniel",
booktitle = "Proceedings of the Seventh International Conference on Language Resources and Evaluation ({LREC}'10)",
month = may,
year = "2010",
address = "Valletta, Malta",
publisher = "European Language Resources Association (ELRA)",
url = "http://www.lrec-conf.org/proceedings/lrec2010/pdf/195_Paper.pdf",
abstract = "Recent years witness a growing interest in the use of multimodal data for modelling of communicative behaviour in dialogue. Dybkjaer and Bernsen (2002), point out that coding schemes for multimodal data are used solely by their creators. Standardisation has been achieved to some extent for coding behavioural features for certain nonverbal expressions, e.g. for facial expression, however, for the semantic annotation of such expressions combined with other modalities such as speech there is still a long way to go. The majority of existing dialogue act annotation schemes that are designed to code semantic and pragmatic dialogue information are limited to analysis of spoken modality. This paper investigates the applicability of existing dialogue act annotation schemes to the semantic annotation of multimodal data, and the way a dialogue act annotation scheme can be extended to cover dialogue phenomena from multiple modalities. The general conclusion of our explorative study is that a multidimensional dialogue act taxonomy is usable for this purpose when some adjustments are made. We proposed a solution for adding these aspects to a dialogue act annotation scheme without changing its set of communicative functions, in the form of qualifiers that can be attached to communicative function tags.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="petukhova-bunt-2010-towards">
<titleInfo>
<title>Towards an Integrated Scheme for Semantic Annotation of Multimodal Dialogue Data</title>
</titleInfo>
<name type="personal">
<namePart type="given">Volha</namePart>
<namePart type="family">Petukhova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Harry</namePart>
<namePart type="family">Bunt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2010-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC’10)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nicoletta</namePart>
<namePart type="family">Calzolari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Khalid</namePart>
<namePart type="family">Choukri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bente</namePart>
<namePart type="family">Maegaard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joseph</namePart>
<namePart type="family">Mariani</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jan</namePart>
<namePart type="family">Odijk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stelios</namePart>
<namePart type="family">Piperidis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mike</namePart>
<namePart type="family">Rosner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Tapias</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>European Language Resources Association (ELRA)</publisher>
<place>
<placeTerm type="text">Valletta, Malta</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent years witness a growing interest in the use of multimodal data for modelling of communicative behaviour in dialogue. Dybkjaer and Bernsen (2002), point out that coding schemes for multimodal data are used solely by their creators. Standardisation has been achieved to some extent for coding behavioural features for certain nonverbal expressions, e.g. for facial expression, however, for the semantic annotation of such expressions combined with other modalities such as speech there is still a long way to go. The majority of existing dialogue act annotation schemes that are designed to code semantic and pragmatic dialogue information are limited to analysis of spoken modality. This paper investigates the applicability of existing dialogue act annotation schemes to the semantic annotation of multimodal data, and the way a dialogue act annotation scheme can be extended to cover dialogue phenomena from multiple modalities. The general conclusion of our explorative study is that a multidimensional dialogue act taxonomy is usable for this purpose when some adjustments are made. We proposed a solution for adding these aspects to a dialogue act annotation scheme without changing its set of communicative functions, in the form of qualifiers that can be attached to communicative function tags.</abstract>
<identifier type="citekey">petukhova-bunt-2010-towards</identifier>
<location>
<url>http://www.lrec-conf.org/proceedings/lrec2010/pdf/195_Paper.pdf</url>
</location>
<part>
<date>2010-05</date>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Towards an Integrated Scheme for Semantic Annotation of Multimodal Dialogue Data
%A Petukhova, Volha
%A Bunt, Harry
%Y Calzolari, Nicoletta
%Y Choukri, Khalid
%Y Maegaard, Bente
%Y Mariani, Joseph
%Y Odijk, Jan
%Y Piperidis, Stelios
%Y Rosner, Mike
%Y Tapias, Daniel
%S Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC’10)
%D 2010
%8 May
%I European Language Resources Association (ELRA)
%C Valletta, Malta
%F petukhova-bunt-2010-towards
%X Recent years witness a growing interest in the use of multimodal data for modelling of communicative behaviour in dialogue. Dybkjaer and Bernsen (2002), point out that coding schemes for multimodal data are used solely by their creators. Standardisation has been achieved to some extent for coding behavioural features for certain nonverbal expressions, e.g. for facial expression, however, for the semantic annotation of such expressions combined with other modalities such as speech there is still a long way to go. The majority of existing dialogue act annotation schemes that are designed to code semantic and pragmatic dialogue information are limited to analysis of spoken modality. This paper investigates the applicability of existing dialogue act annotation schemes to the semantic annotation of multimodal data, and the way a dialogue act annotation scheme can be extended to cover dialogue phenomena from multiple modalities. The general conclusion of our explorative study is that a multidimensional dialogue act taxonomy is usable for this purpose when some adjustments are made. We proposed a solution for adding these aspects to a dialogue act annotation scheme without changing its set of communicative functions, in the form of qualifiers that can be attached to communicative function tags.
%U http://www.lrec-conf.org/proceedings/lrec2010/pdf/195_Paper.pdf
Markdown (Informal)
[Towards an Integrated Scheme for Semantic Annotation of Multimodal Dialogue Data](http://www.lrec-conf.org/proceedings/lrec2010/pdf/195_Paper.pdf) (Petukhova & Bunt, LREC 2010)
ACL