@inproceedings{zhao-etal-2025-multimodal-foundation,
title = "Can Multimodal Foundation Models Understand Schematic Diagrams? An Empirical Study on Information-Seeking {QA} over Scientific Papers",
author = "Zhao, Yilun and
Wang, Chengye and
Li, Chuhan and
Cohan, Arman",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-acl.957/",
doi = "10.18653/v1/2025.findings-acl.957",
pages = "18598--18631",
ISBN = "979-8-89176-256-5",
abstract = "This paper introduces MISS-QA, the first benchmark specifically designed to evaluate the ability of models to interpret schematic diagrams within scientific literature. MISS-QA comprises 3,000 expert-annotated examples over 983 scientific papers. In this benchmark, models are tasked with interpreting schematic diagrams that illustrate research overviews and answering corresponding information-seeking questions based on the broader context of the paper. To ensure reliable and consistent evaluation, we propose an automated evaluating protocol powered by open-source LLMs trained on human-scored data. We assess the performance of 18 frontier multimodal foundation models, including o1, Claude-3.5, Llama-3.2-Vision, and Qwen2-VL. We reveal a significant performance gap between these models and human experts on MISS-QA. Our analysis of model performance on unanswerable questions and our detailed error analysis further highlight the strengths and limitations of current models, offering key insights to enhance models in comprehending multimodal scientific literature."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhao-etal-2025-multimodal-foundation">
<titleInfo>
<title>Can Multimodal Foundation Models Understand Schematic Diagrams? An Empirical Study on Information-Seeking QA over Scientific Papers</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yilun</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chengye</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chuhan</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arman</namePart>
<namePart type="family">Cohan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-256-5</identifier>
</relatedItem>
<abstract>This paper introduces MISS-QA, the first benchmark specifically designed to evaluate the ability of models to interpret schematic diagrams within scientific literature. MISS-QA comprises 3,000 expert-annotated examples over 983 scientific papers. In this benchmark, models are tasked with interpreting schematic diagrams that illustrate research overviews and answering corresponding information-seeking questions based on the broader context of the paper. To ensure reliable and consistent evaluation, we propose an automated evaluating protocol powered by open-source LLMs trained on human-scored data. We assess the performance of 18 frontier multimodal foundation models, including o1, Claude-3.5, Llama-3.2-Vision, and Qwen2-VL. We reveal a significant performance gap between these models and human experts on MISS-QA. Our analysis of model performance on unanswerable questions and our detailed error analysis further highlight the strengths and limitations of current models, offering key insights to enhance models in comprehending multimodal scientific literature.</abstract>
<identifier type="citekey">zhao-etal-2025-multimodal-foundation</identifier>
<identifier type="doi">10.18653/v1/2025.findings-acl.957</identifier>
<location>
<url>https://aclanthology.org/2025.findings-acl.957/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>18598</start>
<end>18631</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Can Multimodal Foundation Models Understand Schematic Diagrams? An Empirical Study on Information-Seeking QA over Scientific Papers
%A Zhao, Yilun
%A Wang, Chengye
%A Li, Chuhan
%A Cohan, Arman
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Findings of the Association for Computational Linguistics: ACL 2025
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-256-5
%F zhao-etal-2025-multimodal-foundation
%X This paper introduces MISS-QA, the first benchmark specifically designed to evaluate the ability of models to interpret schematic diagrams within scientific literature. MISS-QA comprises 3,000 expert-annotated examples over 983 scientific papers. In this benchmark, models are tasked with interpreting schematic diagrams that illustrate research overviews and answering corresponding information-seeking questions based on the broader context of the paper. To ensure reliable and consistent evaluation, we propose an automated evaluating protocol powered by open-source LLMs trained on human-scored data. We assess the performance of 18 frontier multimodal foundation models, including o1, Claude-3.5, Llama-3.2-Vision, and Qwen2-VL. We reveal a significant performance gap between these models and human experts on MISS-QA. Our analysis of model performance on unanswerable questions and our detailed error analysis further highlight the strengths and limitations of current models, offering key insights to enhance models in comprehending multimodal scientific literature.
%R 10.18653/v1/2025.findings-acl.957
%U https://aclanthology.org/2025.findings-acl.957/
%U https://doi.org/10.18653/v1/2025.findings-acl.957
%P 18598-18631
Markdown (Informal)
[Can Multimodal Foundation Models Understand Schematic Diagrams? An Empirical Study on Information-Seeking QA over Scientific Papers](https://aclanthology.org/2025.findings-acl.957/) (Zhao et al., Findings 2025)
ACL