@inproceedings{hagstrom-etal-2022-use,
title = "Can We Use Small Models to Investigate Multimodal Fusion Methods?",
author = {Hagstr{\"o}m, Lovisa and
Norlund, Tobias and
Johansson, Richard},
editor = "Dobnik, Simon and
Grove, Julian and
Sayeed, Asad",
booktitle = "Proceedings of the 2022 CLASP Conference on (Dis)embodiment",
month = sep,
year = "2022",
address = "Gothenburg, Sweden",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.clasp-1.5/",
pages = "45--50",
abstract = "Many successful methods for fusing language with information from the visual modality have recently been proposed and the topic of multimodal training is ever evolving. However, it is still largely not known what makes different vision-and-language models successful. Investigations into this are made difficult by the large sizes of the models used, requiring large training datasets and causing long train and compute times. Therefore, we propose the idea of studying multimodal fusion methods in a smaller setting with small models and datasets. In this setting, we can experiment with different approaches for fusing multimodal information with language in a controlled fashion, while allowing for fast experimentation. We illustrate this idea with the math arithmetics sandbox. This is a setting in which we fuse language with information from the math modality and strive to replicate some fusion methods from the vision-and-language domain. We find that some results for fusion methods from the larger domain translate to the math arithmetics sandbox, indicating a promising future avenue for multimodal model prototyping."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hagstrom-etal-2022-use">
<titleInfo>
<title>Can We Use Small Models to Investigate Multimodal Fusion Methods?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lovisa</namePart>
<namePart type="family">Hagström</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tobias</namePart>
<namePart type="family">Norlund</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Richard</namePart>
<namePart type="family">Johansson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2022 CLASP Conference on (Dis)embodiment</title>
</titleInfo>
<name type="personal">
<namePart type="given">Simon</namePart>
<namePart type="family">Dobnik</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julian</namePart>
<namePart type="family">Grove</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Asad</namePart>
<namePart type="family">Sayeed</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Gothenburg, Sweden</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Many successful methods for fusing language with information from the visual modality have recently been proposed and the topic of multimodal training is ever evolving. However, it is still largely not known what makes different vision-and-language models successful. Investigations into this are made difficult by the large sizes of the models used, requiring large training datasets and causing long train and compute times. Therefore, we propose the idea of studying multimodal fusion methods in a smaller setting with small models and datasets. In this setting, we can experiment with different approaches for fusing multimodal information with language in a controlled fashion, while allowing for fast experimentation. We illustrate this idea with the math arithmetics sandbox. This is a setting in which we fuse language with information from the math modality and strive to replicate some fusion methods from the vision-and-language domain. We find that some results for fusion methods from the larger domain translate to the math arithmetics sandbox, indicating a promising future avenue for multimodal model prototyping.</abstract>
<identifier type="citekey">hagstrom-etal-2022-use</identifier>
<location>
<url>https://aclanthology.org/2022.clasp-1.5/</url>
</location>
<part>
<date>2022-09</date>
<extent unit="page">
<start>45</start>
<end>50</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Can We Use Small Models to Investigate Multimodal Fusion Methods?
%A Hagström, Lovisa
%A Norlund, Tobias
%A Johansson, Richard
%Y Dobnik, Simon
%Y Grove, Julian
%Y Sayeed, Asad
%S Proceedings of the 2022 CLASP Conference on (Dis)embodiment
%D 2022
%8 September
%I Association for Computational Linguistics
%C Gothenburg, Sweden
%F hagstrom-etal-2022-use
%X Many successful methods for fusing language with information from the visual modality have recently been proposed and the topic of multimodal training is ever evolving. However, it is still largely not known what makes different vision-and-language models successful. Investigations into this are made difficult by the large sizes of the models used, requiring large training datasets and causing long train and compute times. Therefore, we propose the idea of studying multimodal fusion methods in a smaller setting with small models and datasets. In this setting, we can experiment with different approaches for fusing multimodal information with language in a controlled fashion, while allowing for fast experimentation. We illustrate this idea with the math arithmetics sandbox. This is a setting in which we fuse language with information from the math modality and strive to replicate some fusion methods from the vision-and-language domain. We find that some results for fusion methods from the larger domain translate to the math arithmetics sandbox, indicating a promising future avenue for multimodal model prototyping.
%U https://aclanthology.org/2022.clasp-1.5/
%P 45-50
Markdown (Informal)
[Can We Use Small Models to Investigate Multimodal Fusion Methods?](https://aclanthology.org/2022.clasp-1.5/) (Hagström et al., CLASP 2022)
ACL