@inproceedings{kleingarn-etal-2019-speaker,
title = "Speaker-adapted neural-network-based fusion for multimodal reference resolution",
author = "Kleingarn, Diana and
Nabizadeh, Nima and
Heckmann, Martin and
Kolossa, Dorothea",
editor = "Nakamura, Satoshi and
Gasic, Milica and
Zukerman, Ingrid and
Skantze, Gabriel and
Nakano, Mikio and
Papangelis, Alexandros and
Ultes, Stefan and
Yoshino, Koichiro",
booktitle = "Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue",
month = sep,
year = "2019",
address = "Stockholm, Sweden",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-5925",
doi = "10.18653/v1/W19-5925",
pages = "210--214",
abstract = "Humans use a variety of approaches to reference objects in the external world, including verbal descriptions, hand and head gestures, eye gaze or any combination of them. The amount of useful information from each modality, however, may vary depending on the specific person and on several other factors. For this reason, it is important to learn the correct combination of inputs for inferring the best-fitting reference. In this paper, we investigate appropriate speaker-dependent and independent fusion strategies in a multimodal reference resolution task. We show that without any change in the modality models, only through an optimized fusion technique, it is possible to reduce the error rate of the system on a reference resolution task by more than 50{\%}.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kleingarn-etal-2019-speaker">
<titleInfo>
<title>Speaker-adapted neural-network-based fusion for multimodal reference resolution</title>
</titleInfo>
<name type="personal">
<namePart type="given">Diana</namePart>
<namePart type="family">Kleingarn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nima</namePart>
<namePart type="family">Nabizadeh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Martin</namePart>
<namePart type="family">Heckmann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dorothea</namePart>
<namePart type="family">Kolossa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue</title>
</titleInfo>
<name type="personal">
<namePart type="given">Satoshi</namePart>
<namePart type="family">Nakamura</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Milica</namePart>
<namePart type="family">Gasic</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ingrid</namePart>
<namePart type="family">Zukerman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gabriel</namePart>
<namePart type="family">Skantze</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mikio</namePart>
<namePart type="family">Nakano</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexandros</namePart>
<namePart type="family">Papangelis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stefan</namePart>
<namePart type="family">Ultes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Koichiro</namePart>
<namePart type="family">Yoshino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Stockholm, Sweden</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Humans use a variety of approaches to reference objects in the external world, including verbal descriptions, hand and head gestures, eye gaze or any combination of them. The amount of useful information from each modality, however, may vary depending on the specific person and on several other factors. For this reason, it is important to learn the correct combination of inputs for inferring the best-fitting reference. In this paper, we investigate appropriate speaker-dependent and independent fusion strategies in a multimodal reference resolution task. We show that without any change in the modality models, only through an optimized fusion technique, it is possible to reduce the error rate of the system on a reference resolution task by more than 50%.</abstract>
<identifier type="citekey">kleingarn-etal-2019-speaker</identifier>
<identifier type="doi">10.18653/v1/W19-5925</identifier>
<location>
<url>https://aclanthology.org/W19-5925</url>
</location>
<part>
<date>2019-09</date>
<extent unit="page">
<start>210</start>
<end>214</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Speaker-adapted neural-network-based fusion for multimodal reference resolution
%A Kleingarn, Diana
%A Nabizadeh, Nima
%A Heckmann, Martin
%A Kolossa, Dorothea
%Y Nakamura, Satoshi
%Y Gasic, Milica
%Y Zukerman, Ingrid
%Y Skantze, Gabriel
%Y Nakano, Mikio
%Y Papangelis, Alexandros
%Y Ultes, Stefan
%Y Yoshino, Koichiro
%S Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue
%D 2019
%8 September
%I Association for Computational Linguistics
%C Stockholm, Sweden
%F kleingarn-etal-2019-speaker
%X Humans use a variety of approaches to reference objects in the external world, including verbal descriptions, hand and head gestures, eye gaze or any combination of them. The amount of useful information from each modality, however, may vary depending on the specific person and on several other factors. For this reason, it is important to learn the correct combination of inputs for inferring the best-fitting reference. In this paper, we investigate appropriate speaker-dependent and independent fusion strategies in a multimodal reference resolution task. We show that without any change in the modality models, only through an optimized fusion technique, it is possible to reduce the error rate of the system on a reference resolution task by more than 50%.
%R 10.18653/v1/W19-5925
%U https://aclanthology.org/W19-5925
%U https://doi.org/10.18653/v1/W19-5925
%P 210-214
Markdown (Informal)
[Speaker-adapted neural-network-based fusion for multimodal reference resolution](https://aclanthology.org/W19-5925) (Kleingarn et al., SIGDIAL 2019)
ACL