@inproceedings{nukrai-etal-2022-text,
title = "Text-Only Training for Image Captioning using Noise-Injected {CLIP}",
author = "Nukrai, David and
Mokady, Ron and
Globerson, Amir",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.findings-emnlp.299/",
doi = "10.18653/v1/2022.findings-emnlp.299",
pages = "4055--4063",
abstract = "We consider the task of image-captioning using only the CLIP model and additional text data at training time and no additional captioned images. Our approach relies on the fact that CLIP is trained to make visual and textual embeddings similar. Therefore, we only need to learn how to translate CLIP textual embeddings back into text, and we can learn how to do this by learning a decoder for the frozen CLIP text encoder using only text. We argue that this intuition is {\textquotedblleft}almost correct{\textquotedblright} because of a gap between the embedding spaces, and propose to rectify this via noise injection during training. We demonstrate the effectiveness of our approach by showing SOTA zero-shot image captioning across four benchmarks, including style transfer. Code, data, and models are available at https://github.com/DavidHuji/CapDec."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="nukrai-etal-2022-text">
<titleInfo>
<title>Text-Only Training for Image Captioning using Noise-Injected CLIP</title>
</titleInfo>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Nukrai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ron</namePart>
<namePart type="family">Mokady</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amir</namePart>
<namePart type="family">Globerson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2022</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Goldberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zornitsa</namePart>
<namePart type="family">Kozareva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yue</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We consider the task of image-captioning using only the CLIP model and additional text data at training time and no additional captioned images. Our approach relies on the fact that CLIP is trained to make visual and textual embeddings similar. Therefore, we only need to learn how to translate CLIP textual embeddings back into text, and we can learn how to do this by learning a decoder for the frozen CLIP text encoder using only text. We argue that this intuition is “almost correct” because of a gap between the embedding spaces, and propose to rectify this via noise injection during training. We demonstrate the effectiveness of our approach by showing SOTA zero-shot image captioning across four benchmarks, including style transfer. Code, data, and models are available at https://github.com/DavidHuji/CapDec.</abstract>
<identifier type="citekey">nukrai-etal-2022-text</identifier>
<identifier type="doi">10.18653/v1/2022.findings-emnlp.299</identifier>
<location>
<url>https://aclanthology.org/2022.findings-emnlp.299/</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>4055</start>
<end>4063</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Text-Only Training for Image Captioning using Noise-Injected CLIP
%A Nukrai, David
%A Mokady, Ron
%A Globerson, Amir
%Y Goldberg, Yoav
%Y Kozareva, Zornitsa
%Y Zhang, Yue
%S Findings of the Association for Computational Linguistics: EMNLP 2022
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates
%F nukrai-etal-2022-text
%X We consider the task of image-captioning using only the CLIP model and additional text data at training time and no additional captioned images. Our approach relies on the fact that CLIP is trained to make visual and textual embeddings similar. Therefore, we only need to learn how to translate CLIP textual embeddings back into text, and we can learn how to do this by learning a decoder for the frozen CLIP text encoder using only text. We argue that this intuition is “almost correct” because of a gap between the embedding spaces, and propose to rectify this via noise injection during training. We demonstrate the effectiveness of our approach by showing SOTA zero-shot image captioning across four benchmarks, including style transfer. Code, data, and models are available at https://github.com/DavidHuji/CapDec.
%R 10.18653/v1/2022.findings-emnlp.299
%U https://aclanthology.org/2022.findings-emnlp.299/
%U https://doi.org/10.18653/v1/2022.findings-emnlp.299
%P 4055-4063
Markdown (Informal)
[Text-Only Training for Image Captioning using Noise-Injected CLIP](https://aclanthology.org/2022.findings-emnlp.299/) (Nukrai et al., Findings 2022)
ACL