@inproceedings{tikhonov-etal-2024-individuation,
title = "Individuation in Neural Models with and without Visual Grounding",
author = "Tikhonov, Alexey and
Bylinina, Lisa and
Yamshchikov, Ivan P.",
editor = "Peled-Cohen, Lotem and
Calderon, Nitay and
Lissak, Shir and
Reichart, Roi",
booktitle = "Proceedings of the 1st Workshop on NLP for Science (NLP4Science)",
month = nov,
year = "2024",
address = "Miami, FL, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.nlp4science-1.21",
pages = "240--248",
abstract = "We show differences between a language-and-vision model CLIP and two text-only models {---} FastText and SBERT {---} when it comes to the encoding of individuation information. We study latent representations that CLIP provides for substrates, granular aggregates, and various numbers of objects. We demonstrate that CLIP embeddings capture quantitative differences in individuation better than models trained on text-only data. Moreover, the individuation hierarchy we deduce from the CLIP embeddings agrees with the hierarchies proposed in linguistics and cognitive science.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tikhonov-etal-2024-individuation">
<titleInfo>
<title>Individuation in Neural Models with and without Visual Grounding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alexey</namePart>
<namePart type="family">Tikhonov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lisa</namePart>
<namePart type="family">Bylinina</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="given">P</namePart>
<namePart type="family">Yamshchikov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on NLP for Science (NLP4Science)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lotem</namePart>
<namePart type="family">Peled-Cohen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nitay</namePart>
<namePart type="family">Calderon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shir</namePart>
<namePart type="family">Lissak</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roi</namePart>
<namePart type="family">Reichart</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, FL, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We show differences between a language-and-vision model CLIP and two text-only models — FastText and SBERT — when it comes to the encoding of individuation information. We study latent representations that CLIP provides for substrates, granular aggregates, and various numbers of objects. We demonstrate that CLIP embeddings capture quantitative differences in individuation better than models trained on text-only data. Moreover, the individuation hierarchy we deduce from the CLIP embeddings agrees with the hierarchies proposed in linguistics and cognitive science.</abstract>
<identifier type="citekey">tikhonov-etal-2024-individuation</identifier>
<location>
<url>https://aclanthology.org/2024.nlp4science-1.21</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>240</start>
<end>248</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Individuation in Neural Models with and without Visual Grounding
%A Tikhonov, Alexey
%A Bylinina, Lisa
%A Yamshchikov, Ivan P.
%Y Peled-Cohen, Lotem
%Y Calderon, Nitay
%Y Lissak, Shir
%Y Reichart, Roi
%S Proceedings of the 1st Workshop on NLP for Science (NLP4Science)
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, FL, USA
%F tikhonov-etal-2024-individuation
%X We show differences between a language-and-vision model CLIP and two text-only models — FastText and SBERT — when it comes to the encoding of individuation information. We study latent representations that CLIP provides for substrates, granular aggregates, and various numbers of objects. We demonstrate that CLIP embeddings capture quantitative differences in individuation better than models trained on text-only data. Moreover, the individuation hierarchy we deduce from the CLIP embeddings agrees with the hierarchies proposed in linguistics and cognitive science.
%U https://aclanthology.org/2024.nlp4science-1.21
%P 240-248
Markdown (Informal)
[Individuation in Neural Models with and without Visual Grounding](https://aclanthology.org/2024.nlp4science-1.21) (Tikhonov et al., NLP4Science 2024)
ACL