@inproceedings{jensen-etal-2026-stochastic,
title = "Stochastic Parrots or True Virtuosos? Digging Deeper Into the Audio-Video Understanding of {AVQA} Models",
author = "Jensen, Sara Pernille and
Innset Hurum, Hallvard and
Christodoulou, Anna-Maria",
editor = "Epure, Elena V. and
Oramas, Sergio and
Doh, SeungHeon and
Ramoneda, Pedro and
Kruspe, Anna and
Sordo, Mohamed",
booktitle = "Proceedings of the 4th Workshop on {NLP} for Music and Audio ({NLP}4{M}us{A} 2026)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2026.nlp4musa-1.3/",
pages = "13--19",
ISBN = "979-8-89176-369-2",
abstract = "Audio-video question answering (AVQA) systems for music show signs of multimodal ``understanding'', but it is unclear which inputs they rely on or whether their behavior reflects genuine audio-video reasoning. Existing evaluations focus on overall accuracy and rarely examine modality dependence. We address this gap by suggesting a method of using counterfactual evaluations to analyse the audio-video understanding of the models, illustrated with a case study on the audio-video spatial-temporal (AVST) architecture. This includes interventions that zero out or swap audio, video, or both, where results are benchmarked against a baseline based on linguistic patterns alone. Results show stronger reliance on audio than video, yet performance persists when either modality is removed, indicating learned cross-modal representations. The AVQA system studied thus exhibits non-trivial multimodal integration, though its ``understanding'' remains uneven."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jensen-etal-2026-stochastic">
<titleInfo>
<title>Stochastic Parrots or True Virtuosos? Digging Deeper Into the Audio-Video Understanding of AVQA Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sara</namePart>
<namePart type="given">Pernille</namePart>
<namePart type="family">Jensen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hallvard</namePart>
<namePart type="family">Innset Hurum</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna-Maria</namePart>
<namePart type="family">Christodoulou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2026-03</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 4th Workshop on NLP for Music and Audio (NLP4MusA 2026)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Elena</namePart>
<namePart type="given">V</namePart>
<namePart type="family">Epure</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sergio</namePart>
<namePart type="family">Oramas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">SeungHeon</namePart>
<namePart type="family">Doh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pedro</namePart>
<namePart type="family">Ramoneda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Kruspe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohamed</namePart>
<namePart type="family">Sordo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Rabat, Morocco</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-369-2</identifier>
</relatedItem>
<abstract>Audio-video question answering (AVQA) systems for music show signs of multimodal “understanding”, but it is unclear which inputs they rely on or whether their behavior reflects genuine audio-video reasoning. Existing evaluations focus on overall accuracy and rarely examine modality dependence. We address this gap by suggesting a method of using counterfactual evaluations to analyse the audio-video understanding of the models, illustrated with a case study on the audio-video spatial-temporal (AVST) architecture. This includes interventions that zero out or swap audio, video, or both, where results are benchmarked against a baseline based on linguistic patterns alone. Results show stronger reliance on audio than video, yet performance persists when either modality is removed, indicating learned cross-modal representations. The AVQA system studied thus exhibits non-trivial multimodal integration, though its “understanding” remains uneven.</abstract>
<identifier type="citekey">jensen-etal-2026-stochastic</identifier>
<location>
<url>https://aclanthology.org/2026.nlp4musa-1.3/</url>
</location>
<part>
<date>2026-03</date>
<extent unit="page">
<start>13</start>
<end>19</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Stochastic Parrots or True Virtuosos? Digging Deeper Into the Audio-Video Understanding of AVQA Models
%A Jensen, Sara Pernille
%A Innset Hurum, Hallvard
%A Christodoulou, Anna-Maria
%Y Epure, Elena V.
%Y Oramas, Sergio
%Y Doh, SeungHeon
%Y Ramoneda, Pedro
%Y Kruspe, Anna
%Y Sordo, Mohamed
%S Proceedings of the 4th Workshop on NLP for Music and Audio (NLP4MusA 2026)
%D 2026
%8 March
%I Association for Computational Linguistics
%C Rabat, Morocco
%@ 979-8-89176-369-2
%F jensen-etal-2026-stochastic
%X Audio-video question answering (AVQA) systems for music show signs of multimodal “understanding”, but it is unclear which inputs they rely on or whether their behavior reflects genuine audio-video reasoning. Existing evaluations focus on overall accuracy and rarely examine modality dependence. We address this gap by suggesting a method of using counterfactual evaluations to analyse the audio-video understanding of the models, illustrated with a case study on the audio-video spatial-temporal (AVST) architecture. This includes interventions that zero out or swap audio, video, or both, where results are benchmarked against a baseline based on linguistic patterns alone. Results show stronger reliance on audio than video, yet performance persists when either modality is removed, indicating learned cross-modal representations. The AVQA system studied thus exhibits non-trivial multimodal integration, though its “understanding” remains uneven.
%U https://aclanthology.org/2026.nlp4musa-1.3/
%P 13-19
Markdown (Informal)
[Stochastic Parrots or True Virtuosos? Digging Deeper Into the Audio-Video Understanding of AVQA Models](https://aclanthology.org/2026.nlp4musa-1.3/) (Jensen et al., NLP4MusA 2026)
ACL