@inproceedings{tanti-etal-2017-role,
title = "What is the Role of Recurrent Neural Networks ({RNN}s) in an Image Caption Generator?",
author = "Tanti, Marc and
Gatt, Albert and
Camilleri, Kenneth",
editor = "Alonso, Jose M. and
Bugar{\'\i}n, Alberto and
Reiter, Ehud",
booktitle = "Proceedings of the 10th International Conference on Natural Language Generation",
month = sep,
year = "2017",
address = "Santiago de Compostela, Spain",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-3506",
doi = "10.18653/v1/W17-3506",
pages = "51--60",
abstract = "Image captioning has evolved into a core task for Natural Language Generation and has also proved to be an important testbed for deep learning approaches to handling multimodal representations. Most contemporary approaches rely on a combination of a convolutional network to handle image features, and a recurrent network to encode linguistic information. The latter is typically viewed as the primary {``}generation{''} component. Beyond this high-level characterisation, a CNN+RNN model supports a variety of architectural designs. The dominant model in the literature is one in which visual features encoded by a CNN are {``}injected{''} as part of the linguistic encoding process, driving the RNN{'}s linguistic choices. By contrast, it is possible to envisage an architecture in which visual and linguistic features are encoded separately, and merged at a subsequent stage. In this paper, we address two related questions: (1) Is direct injection the best way of combining multimodal information, or is a late merging alternative better for the image captioning task? (2) To what extent should a recurrent network be viewed as actually generating, rather than simply encoding, linguistic information?",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tanti-etal-2017-role">
<titleInfo>
<title>What is the Role of Recurrent Neural Networks (RNNs) in an Image Caption Generator?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marc</namePart>
<namePart type="family">Tanti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Albert</namePart>
<namePart type="family">Gatt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kenneth</namePart>
<namePart type="family">Camilleri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 10th International Conference on Natural Language Generation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jose</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Alonso</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alberto</namePart>
<namePart type="family">Bugarín</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ehud</namePart>
<namePart type="family">Reiter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Santiago de Compostela, Spain</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Image captioning has evolved into a core task for Natural Language Generation and has also proved to be an important testbed for deep learning approaches to handling multimodal representations. Most contemporary approaches rely on a combination of a convolutional network to handle image features, and a recurrent network to encode linguistic information. The latter is typically viewed as the primary “generation” component. Beyond this high-level characterisation, a CNN+RNN model supports a variety of architectural designs. The dominant model in the literature is one in which visual features encoded by a CNN are “injected” as part of the linguistic encoding process, driving the RNN’s linguistic choices. By contrast, it is possible to envisage an architecture in which visual and linguistic features are encoded separately, and merged at a subsequent stage. In this paper, we address two related questions: (1) Is direct injection the best way of combining multimodal information, or is a late merging alternative better for the image captioning task? (2) To what extent should a recurrent network be viewed as actually generating, rather than simply encoding, linguistic information?</abstract>
<identifier type="citekey">tanti-etal-2017-role</identifier>
<identifier type="doi">10.18653/v1/W17-3506</identifier>
<location>
<url>https://aclanthology.org/W17-3506</url>
</location>
<part>
<date>2017-09</date>
<extent unit="page">
<start>51</start>
<end>60</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T What is the Role of Recurrent Neural Networks (RNNs) in an Image Caption Generator?
%A Tanti, Marc
%A Gatt, Albert
%A Camilleri, Kenneth
%Y Alonso, Jose M.
%Y Bugarín, Alberto
%Y Reiter, Ehud
%S Proceedings of the 10th International Conference on Natural Language Generation
%D 2017
%8 September
%I Association for Computational Linguistics
%C Santiago de Compostela, Spain
%F tanti-etal-2017-role
%X Image captioning has evolved into a core task for Natural Language Generation and has also proved to be an important testbed for deep learning approaches to handling multimodal representations. Most contemporary approaches rely on a combination of a convolutional network to handle image features, and a recurrent network to encode linguistic information. The latter is typically viewed as the primary “generation” component. Beyond this high-level characterisation, a CNN+RNN model supports a variety of architectural designs. The dominant model in the literature is one in which visual features encoded by a CNN are “injected” as part of the linguistic encoding process, driving the RNN’s linguistic choices. By contrast, it is possible to envisage an architecture in which visual and linguistic features are encoded separately, and merged at a subsequent stage. In this paper, we address two related questions: (1) Is direct injection the best way of combining multimodal information, or is a late merging alternative better for the image captioning task? (2) To what extent should a recurrent network be viewed as actually generating, rather than simply encoding, linguistic information?
%R 10.18653/v1/W17-3506
%U https://aclanthology.org/W17-3506
%U https://doi.org/10.18653/v1/W17-3506
%P 51-60
Markdown (Informal)
[What is the Role of Recurrent Neural Networks (RNNs) in an Image Caption Generator?](https://aclanthology.org/W17-3506) (Tanti et al., INLG 2017)
ACL