@inproceedings{milewski-etal-2022-finding,
title = "Finding Structural Knowledge in Multimodal-{BERT}",
author = "Milewski, Victor and
de Lhoneux, Miryam and
Moens, Marie-Francine",
booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.acl-long.388",
doi = "10.18653/v1/2022.acl-long.388",
pages = "5658--5671",
abstract = "In this work, we investigate the knowledge learned in the embeddings of multimodal-BERT models. More specifically, we probe their capabilities of storing the grammatical structure of linguistic data and the structure learned over objects in visual data. To reach that goal, we first make the inherent structure of language and visuals explicit by a dependency parse of the sentences that describe the image and by the dependencies between the object regions in the image, respectively. We call this explicit visual structure the scene tree, that is based on the dependency tree of the language description. Extensive probing experiments show that the multimodal-BERT models do not encode these scene trees.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="milewski-etal-2022-finding">
<titleInfo>
<title>Finding Structural Knowledge in Multimodal-BERT</title>
</titleInfo>
<name type="personal">
<namePart type="given">Victor</namePart>
<namePart type="family">Milewski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Miryam</namePart>
<namePart type="family">de Lhoneux</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marie-Francine</namePart>
<namePart type="family">Moens</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this work, we investigate the knowledge learned in the embeddings of multimodal-BERT models. More specifically, we probe their capabilities of storing the grammatical structure of linguistic data and the structure learned over objects in visual data. To reach that goal, we first make the inherent structure of language and visuals explicit by a dependency parse of the sentences that describe the image and by the dependencies between the object regions in the image, respectively. We call this explicit visual structure the scene tree, that is based on the dependency tree of the language description. Extensive probing experiments show that the multimodal-BERT models do not encode these scene trees.</abstract>
<identifier type="citekey">milewski-etal-2022-finding</identifier>
<identifier type="doi">10.18653/v1/2022.acl-long.388</identifier>
<location>
<url>https://aclanthology.org/2022.acl-long.388</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>5658</start>
<end>5671</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Finding Structural Knowledge in Multimodal-BERT
%A Milewski, Victor
%A de Lhoneux, Miryam
%A Moens, Marie-Francine
%S Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F milewski-etal-2022-finding
%X In this work, we investigate the knowledge learned in the embeddings of multimodal-BERT models. More specifically, we probe their capabilities of storing the grammatical structure of linguistic data and the structure learned over objects in visual data. To reach that goal, we first make the inherent structure of language and visuals explicit by a dependency parse of the sentences that describe the image and by the dependencies between the object regions in the image, respectively. We call this explicit visual structure the scene tree, that is based on the dependency tree of the language description. Extensive probing experiments show that the multimodal-BERT models do not encode these scene trees.
%R 10.18653/v1/2022.acl-long.388
%U https://aclanthology.org/2022.acl-long.388
%U https://doi.org/10.18653/v1/2022.acl-long.388
%P 5658-5671
Markdown (Informal)
[Finding Structural Knowledge in Multimodal-BERT](https://aclanthology.org/2022.acl-long.388) (Milewski et al., ACL 2022)
ACL
- Victor Milewski, Miryam de Lhoneux, and Marie-Francine Moens. 2022. Finding Structural Knowledge in Multimodal-BERT. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5658–5671, Dublin, Ireland. Association for Computational Linguistics.