@article{hendricks-etal-2021-decoupling,
title = "Decoupling the Role of Data, Attention, and Losses in Multimodal Transformers",
author = "Hendricks, Lisa Anne and
Mellor, John and
Schneider, Rosalia and
Alayrac, Jean-Baptiste and
Nematzadeh, Aida",
editor = "Roark, Brian and
Nenkova, Ani",
journal = "Transactions of the Association for Computational Linguistics",
volume = "9",
year = "2021",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/2021.tacl-1.35/",
doi = "10.1162/tacl_a_00385",
pages = "570--585",
abstract = "Recently, multimodal transformer models have gained popularity because their performance on downstream tasks suggests they learn rich visual-linguistic representations. Focusing on zero-shot image retrieval tasks, we study three important factors that can impact the quality of learned representations: pretraining data, the attention mechanism, and loss functions. By pretraining models on six datasets, we observe that dataset noise and language similarity to our downstream task are important indicators of model performance. Through architectural analysis, we learn that models with a multimodal attention mechanism can outperform deeper models with modality-specific attention mechanisms. Finally, we show that successful contrastive losses used in the self-supervised learning literature do not yield similar performance gains when used in multimodal transformers."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hendricks-etal-2021-decoupling">
<titleInfo>
<title>Decoupling the Role of Data, Attention, and Losses in Multimodal Transformers</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lisa</namePart>
<namePart type="given">Anne</namePart>
<namePart type="family">Hendricks</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">John</namePart>
<namePart type="family">Mellor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rosalia</namePart>
<namePart type="family">Schneider</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jean-Baptiste</namePart>
<namePart type="family">Alayrac</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aida</namePart>
<namePart type="family">Nematzadeh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Transactions of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>Recently, multimodal transformer models have gained popularity because their performance on downstream tasks suggests they learn rich visual-linguistic representations. Focusing on zero-shot image retrieval tasks, we study three important factors that can impact the quality of learned representations: pretraining data, the attention mechanism, and loss functions. By pretraining models on six datasets, we observe that dataset noise and language similarity to our downstream task are important indicators of model performance. Through architectural analysis, we learn that models with a multimodal attention mechanism can outperform deeper models with modality-specific attention mechanisms. Finally, we show that successful contrastive losses used in the self-supervised learning literature do not yield similar performance gains when used in multimodal transformers.</abstract>
<identifier type="citekey">hendricks-etal-2021-decoupling</identifier>
<identifier type="doi">10.1162/tacl_a_00385</identifier>
<location>
<url>https://aclanthology.org/2021.tacl-1.35/</url>
</location>
<part>
<date>2021</date>
<detail type="volume"><number>9</number></detail>
<extent unit="page">
<start>570</start>
<end>585</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T Decoupling the Role of Data, Attention, and Losses in Multimodal Transformers
%A Hendricks, Lisa Anne
%A Mellor, John
%A Schneider, Rosalia
%A Alayrac, Jean-Baptiste
%A Nematzadeh, Aida
%J Transactions of the Association for Computational Linguistics
%D 2021
%V 9
%I MIT Press
%C Cambridge, MA
%F hendricks-etal-2021-decoupling
%X Recently, multimodal transformer models have gained popularity because their performance on downstream tasks suggests they learn rich visual-linguistic representations. Focusing on zero-shot image retrieval tasks, we study three important factors that can impact the quality of learned representations: pretraining data, the attention mechanism, and loss functions. By pretraining models on six datasets, we observe that dataset noise and language similarity to our downstream task are important indicators of model performance. Through architectural analysis, we learn that models with a multimodal attention mechanism can outperform deeper models with modality-specific attention mechanisms. Finally, we show that successful contrastive losses used in the self-supervised learning literature do not yield similar performance gains when used in multimodal transformers.
%R 10.1162/tacl_a_00385
%U https://aclanthology.org/2021.tacl-1.35/
%U https://doi.org/10.1162/tacl_a_00385
%P 570-585
Markdown (Informal)
[Decoupling the Role of Data, Attention, and Losses in Multimodal Transformers](https://aclanthology.org/2021.tacl-1.35/) (Hendricks et al., TACL 2021)
ACL