@inproceedings{zarriess-etal-2017-refer,
title = "Refer-i{TTS}: A System for Referring in Spoken Installments to Objects in Real-World Images",
author = "Zarrie{\ss}, Sina and
L{\'o}pez Gambino, M. Soledad and
Schlangen, David",
editor = "Alonso, Jose M. and
Bugar{\'\i}n, Alberto and
Reiter, Ehud",
booktitle = "Proceedings of the 10th International Conference on Natural Language Generation",
month = sep,
year = "2017",
address = "Santiago de Compostela, Spain",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-3509",
doi = "10.18653/v1/W17-3509",
pages = "72--73",
abstract = "Current referring expression generation systems mostly deliver their output as one-shot, written expressions. We present on-going work on incremental generation of spoken expressions referring to objects in real-world images. This approach extends upon previous work using the words-as-classifier model for generation. We implement this generator in an incremental dialogue processing framework such that we can exploit an existing interface to incremental text-to-speech synthesis. Our system generates and synthesizes referring expressions while continuously observing non-verbal user reactions.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zarriess-etal-2017-refer">
<titleInfo>
<title>Refer-iTTS: A System for Referring in Spoken Installments to Objects in Real-World Images</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sina</namePart>
<namePart type="family">Zarrieß</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">M</namePart>
<namePart type="given">Soledad</namePart>
<namePart type="family">López Gambino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Schlangen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 10th International Conference on Natural Language Generation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jose</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Alonso</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alberto</namePart>
<namePart type="family">Bugarín</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ehud</namePart>
<namePart type="family">Reiter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Santiago de Compostela, Spain</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Current referring expression generation systems mostly deliver their output as one-shot, written expressions. We present on-going work on incremental generation of spoken expressions referring to objects in real-world images. This approach extends upon previous work using the words-as-classifier model for generation. We implement this generator in an incremental dialogue processing framework such that we can exploit an existing interface to incremental text-to-speech synthesis. Our system generates and synthesizes referring expressions while continuously observing non-verbal user reactions.</abstract>
<identifier type="citekey">zarriess-etal-2017-refer</identifier>
<identifier type="doi">10.18653/v1/W17-3509</identifier>
<location>
<url>https://aclanthology.org/W17-3509</url>
</location>
<part>
<date>2017-09</date>
<extent unit="page">
<start>72</start>
<end>73</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Refer-iTTS: A System for Referring in Spoken Installments to Objects in Real-World Images
%A Zarrieß, Sina
%A López Gambino, M. Soledad
%A Schlangen, David
%Y Alonso, Jose M.
%Y Bugarín, Alberto
%Y Reiter, Ehud
%S Proceedings of the 10th International Conference on Natural Language Generation
%D 2017
%8 September
%I Association for Computational Linguistics
%C Santiago de Compostela, Spain
%F zarriess-etal-2017-refer
%X Current referring expression generation systems mostly deliver their output as one-shot, written expressions. We present on-going work on incremental generation of spoken expressions referring to objects in real-world images. This approach extends upon previous work using the words-as-classifier model for generation. We implement this generator in an incremental dialogue processing framework such that we can exploit an existing interface to incremental text-to-speech synthesis. Our system generates and synthesizes referring expressions while continuously observing non-verbal user reactions.
%R 10.18653/v1/W17-3509
%U https://aclanthology.org/W17-3509
%U https://doi.org/10.18653/v1/W17-3509
%P 72-73
Markdown (Informal)
[Refer-iTTS: A System for Referring in Spoken Installments to Objects in Real-World Images](https://aclanthology.org/W17-3509) (Zarrieß et al., INLG 2017)
ACL