@inproceedings{zarriess-schlangen-2018-data,
title = "Being data-driven is not enough: Revisiting interactive instruction giving as a challenge for {NLG}",
author = "Zarrie{\ss}, Sina and
Schlangen, David",
editor = "Foster, Mary Ellen and
Buschmeier, Hendrik and
Gkatzia, Dimitra",
booktitle = "Proceedings of the Workshop on {NLG} for Human{--}Robot Interaction",
month = nov,
year = "2018",
address = "Tilburg, The Netherlands",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-6906",
doi = "10.18653/v1/W18-6906",
pages = "27--31",
abstract = "Modeling traditional NLG tasks with data-driven techniques has been a major focus of research in NLG in the past decade. We argue that existing modeling techniques are mostly tailored to textual data and are not sufficient to make NLG technology meet the requirements of agents which target fluid interaction and collaboration in the real world. We revisit interactive instruction giving as a challenge for datadriven NLG and, based on insights from previous GIVE challenges, propose that instruction giving should be addressed in a setting that involves visual grounding and spoken language. These basic design decisions will require NLG frameworks that are capable of monitoring their environment as well as timing and revising their verbal output. We believe that these are core capabilities for making NLG technology transferrable to interactive systems.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zarriess-schlangen-2018-data">
<titleInfo>
<title>Being data-driven is not enough: Revisiting interactive instruction giving as a challenge for NLG</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sina</namePart>
<namePart type="family">Zarrieß</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Schlangen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on NLG for Human–Robot Interaction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mary</namePart>
<namePart type="given">Ellen</namePart>
<namePart type="family">Foster</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hendrik</namePart>
<namePart type="family">Buschmeier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dimitra</namePart>
<namePart type="family">Gkatzia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Tilburg, The Netherlands</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Modeling traditional NLG tasks with data-driven techniques has been a major focus of research in NLG in the past decade. We argue that existing modeling techniques are mostly tailored to textual data and are not sufficient to make NLG technology meet the requirements of agents which target fluid interaction and collaboration in the real world. We revisit interactive instruction giving as a challenge for datadriven NLG and, based on insights from previous GIVE challenges, propose that instruction giving should be addressed in a setting that involves visual grounding and spoken language. These basic design decisions will require NLG frameworks that are capable of monitoring their environment as well as timing and revising their verbal output. We believe that these are core capabilities for making NLG technology transferrable to interactive systems.</abstract>
<identifier type="citekey">zarriess-schlangen-2018-data</identifier>
<identifier type="doi">10.18653/v1/W18-6906</identifier>
<location>
<url>https://aclanthology.org/W18-6906</url>
</location>
<part>
<date>2018-11</date>
<extent unit="page">
<start>27</start>
<end>31</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Being data-driven is not enough: Revisiting interactive instruction giving as a challenge for NLG
%A Zarrieß, Sina
%A Schlangen, David
%Y Foster, Mary Ellen
%Y Buschmeier, Hendrik
%Y Gkatzia, Dimitra
%S Proceedings of the Workshop on NLG for Human–Robot Interaction
%D 2018
%8 November
%I Association for Computational Linguistics
%C Tilburg, The Netherlands
%F zarriess-schlangen-2018-data
%X Modeling traditional NLG tasks with data-driven techniques has been a major focus of research in NLG in the past decade. We argue that existing modeling techniques are mostly tailored to textual data and are not sufficient to make NLG technology meet the requirements of agents which target fluid interaction and collaboration in the real world. We revisit interactive instruction giving as a challenge for datadriven NLG and, based on insights from previous GIVE challenges, propose that instruction giving should be addressed in a setting that involves visual grounding and spoken language. These basic design decisions will require NLG frameworks that are capable of monitoring their environment as well as timing and revising their verbal output. We believe that these are core capabilities for making NLG technology transferrable to interactive systems.
%R 10.18653/v1/W18-6906
%U https://aclanthology.org/W18-6906
%U https://doi.org/10.18653/v1/W18-6906
%P 27-31
Markdown (Informal)
[Being data-driven is not enough: Revisiting interactive instruction giving as a challenge for NLG](https://aclanthology.org/W18-6906) (Zarrieß & Schlangen, INLG 2018)
ACL