@inproceedings{devillers-etal-2021-language,
title = "Does language help generalization in vision models?",
author = "Devillers, Benjamin and
Choksi, Bhavin and
Bielawski, Romain and
VanRullen, Rufin",
editor = "Bisazza, Arianna and
Abend, Omri",
booktitle = "Proceedings of the 25th Conference on Computational Natural Language Learning",
month = nov,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.conll-1.13/",
doi = "10.18653/v1/2021.conll-1.13",
pages = "171--182",
abstract = "Vision models trained on multimodal datasets can benefit from the wide availability of large image-caption datasets. A recent model (CLIP) was found to generalize well in zero-shot and transfer learning settings. This could imply that linguistic or {\textquotedblleft}semantic grounding{\textquotedblright} confers additional generalization abilities to the visual feature space. Here, we systematically evaluate various multimodal architectures and vision-only models in terms of unsupervised clustering, few-shot learning, transfer learning and adversarial robustness. In each setting, multimodal training produced no additional generalization capability compared to standard supervised visual training. We conclude that work is still required for semantic grounding to help improve vision models."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="devillers-etal-2021-language">
<titleInfo>
<title>Does language help generalization in vision models?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Benjamin</namePart>
<namePart type="family">Devillers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bhavin</namePart>
<namePart type="family">Choksi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Romain</namePart>
<namePart type="family">Bielawski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rufin</namePart>
<namePart type="family">VanRullen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 25th Conference on Computational Natural Language Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Arianna</namePart>
<namePart type="family">Bisazza</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Omri</namePart>
<namePart type="family">Abend</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Vision models trained on multimodal datasets can benefit from the wide availability of large image-caption datasets. A recent model (CLIP) was found to generalize well in zero-shot and transfer learning settings. This could imply that linguistic or “semantic grounding” confers additional generalization abilities to the visual feature space. Here, we systematically evaluate various multimodal architectures and vision-only models in terms of unsupervised clustering, few-shot learning, transfer learning and adversarial robustness. In each setting, multimodal training produced no additional generalization capability compared to standard supervised visual training. We conclude that work is still required for semantic grounding to help improve vision models.</abstract>
<identifier type="citekey">devillers-etal-2021-language</identifier>
<identifier type="doi">10.18653/v1/2021.conll-1.13</identifier>
<location>
<url>https://aclanthology.org/2021.conll-1.13/</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>171</start>
<end>182</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Does language help generalization in vision models?
%A Devillers, Benjamin
%A Choksi, Bhavin
%A Bielawski, Romain
%A VanRullen, Rufin
%Y Bisazza, Arianna
%Y Abend, Omri
%S Proceedings of the 25th Conference on Computational Natural Language Learning
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online
%F devillers-etal-2021-language
%X Vision models trained on multimodal datasets can benefit from the wide availability of large image-caption datasets. A recent model (CLIP) was found to generalize well in zero-shot and transfer learning settings. This could imply that linguistic or “semantic grounding” confers additional generalization abilities to the visual feature space. Here, we systematically evaluate various multimodal architectures and vision-only models in terms of unsupervised clustering, few-shot learning, transfer learning and adversarial robustness. In each setting, multimodal training produced no additional generalization capability compared to standard supervised visual training. We conclude that work is still required for semantic grounding to help improve vision models.
%R 10.18653/v1/2021.conll-1.13
%U https://aclanthology.org/2021.conll-1.13/
%U https://doi.org/10.18653/v1/2021.conll-1.13
%P 171-182
Markdown (Informal)
[Does language help generalization in vision models?](https://aclanthology.org/2021.conll-1.13/) (Devillers et al., CoNLL 2021)
ACL
- Benjamin Devillers, Bhavin Choksi, Romain Bielawski, and Rufin VanRullen. 2021. Does language help generalization in vision models?. In Proceedings of the 25th Conference on Computational Natural Language Learning, pages 171–182, Online. Association for Computational Linguistics.