@inproceedings{brady-du-2021-teaching,
title = "Teaching Arm and Head Gestures to a Humanoid Robot through Interactive Demonstration and Spoken Instruction",
author = "Brady, Michael and
Du, Han",
editor = "Donatelli, Lucia and
Krishnaswamy, Nikhil and
Lai, Kenneth and
Pustejovsky, James",
booktitle = "Proceedings of the 1st Workshop on Multimodal Semantic Representations (MMSR)",
month = jun,
year = "2021",
address = "Groningen, Netherlands (Online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.mmsr-1.9/",
pages = "95--101",
abstract = "We describe work in progress for training a humanoid robot to produce iconic arm and head gestures as part of task-oriented dialogue interaction. This involves the development and use of a multimodal dialog manager for non-experts to quickly {\textquoteleft}program' the robot through speech and vision. Using this dialog manager, videos of gesture demonstrations are collected. Motor positions are extracted from these videos to specify motor trajectories where collections of motor trajectories are used to produce robot gestures following a Gaussian mixtures approach. Concluding discussion considers how learned representations may be used for gesture recognition by the robot, and how the framework may mature into a system to address language grounding and semantic representation."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="brady-du-2021-teaching">
<titleInfo>
<title>Teaching Arm and Head Gestures to a Humanoid Robot through Interactive Demonstration and Spoken Instruction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Brady</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Han</namePart>
<namePart type="family">Du</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Multimodal Semantic Representations (MMSR)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lucia</namePart>
<namePart type="family">Donatelli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nikhil</namePart>
<namePart type="family">Krishnaswamy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kenneth</namePart>
<namePart type="family">Lai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">James</namePart>
<namePart type="family">Pustejovsky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Groningen, Netherlands (Online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We describe work in progress for training a humanoid robot to produce iconic arm and head gestures as part of task-oriented dialogue interaction. This involves the development and use of a multimodal dialog manager for non-experts to quickly ‘program’ the robot through speech and vision. Using this dialog manager, videos of gesture demonstrations are collected. Motor positions are extracted from these videos to specify motor trajectories where collections of motor trajectories are used to produce robot gestures following a Gaussian mixtures approach. Concluding discussion considers how learned representations may be used for gesture recognition by the robot, and how the framework may mature into a system to address language grounding and semantic representation.</abstract>
<identifier type="citekey">brady-du-2021-teaching</identifier>
<location>
<url>https://aclanthology.org/2021.mmsr-1.9/</url>
</location>
<part>
<date>2021-06</date>
<extent unit="page">
<start>95</start>
<end>101</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Teaching Arm and Head Gestures to a Humanoid Robot through Interactive Demonstration and Spoken Instruction
%A Brady, Michael
%A Du, Han
%Y Donatelli, Lucia
%Y Krishnaswamy, Nikhil
%Y Lai, Kenneth
%Y Pustejovsky, James
%S Proceedings of the 1st Workshop on Multimodal Semantic Representations (MMSR)
%D 2021
%8 June
%I Association for Computational Linguistics
%C Groningen, Netherlands (Online)
%F brady-du-2021-teaching
%X We describe work in progress for training a humanoid robot to produce iconic arm and head gestures as part of task-oriented dialogue interaction. This involves the development and use of a multimodal dialog manager for non-experts to quickly ‘program’ the robot through speech and vision. Using this dialog manager, videos of gesture demonstrations are collected. Motor positions are extracted from these videos to specify motor trajectories where collections of motor trajectories are used to produce robot gestures following a Gaussian mixtures approach. Concluding discussion considers how learned representations may be used for gesture recognition by the robot, and how the framework may mature into a system to address language grounding and semantic representation.
%U https://aclanthology.org/2021.mmsr-1.9/
%P 95-101
Markdown (Informal)
[Teaching Arm and Head Gestures to a Humanoid Robot through Interactive Demonstration and Spoken Instruction](https://aclanthology.org/2021.mmsr-1.9/) (Brady & Du, MMSR 2021)
ACL