@inproceedings{tang-etal-2025-sparkle,
title = "Sparkle: Mastering Basic Spatial Capabilities in Vision Language Models Elicits Generalization to Spatial Reasoning",
author = "Tang, Yihong and
Qu, Ao and
Wang, Zhaokai and
Zhuang, Dingyi and
Wu, Zhaofeng and
Ma, Wei and
Wang, Shenhao and
Zheng, Yunhan and
Zhao, Zhan and
Zhao, Jinhua",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.217/",
pages = "4083--4103",
ISBN = "979-8-89176-335-7",
abstract = "Vision-language models (VLMs) excel in many downstream tasks but struggle with spatial reasoning, which is crucial for navigation and interaction with physical environments. Specifically, many spatial reasoning tasks rely on fundamental two-dimensional (2D) capabilities, yet our evaluation shows that state-of-the-art VLMs often produce implausible or incorrect solutions for composite spatial problems, including simple pathfinding tasks that humans solve effortlessly at a glance. To address this, we explore an effective approach to enhance 2D spatial reasoning in VLMs by training them solely on basic spatial capabilities. We first disentangle 2D spatial reasoning into three core components: direction comprehension, distance estimation, and localization. Our central hypothesis is that mastering these basic capabilities will significantly boost performance on more complex spatial tasks requiring advanced reasoning and combinatorial problem-solving, as well as generalize to real-world visual-spatial scenarios. To test this hypothesis, we introduce Sparkle, a framework that generates synthetic data to provide targeted supervision for VLMs across these three basic spatial capabilities, producing an instruction dataset for each capability. Our experiments demonstrate that VLMs fine-tuned with Sparkle achieve substantial improvements, not only on basic tasks but also in generalizing to composite and out-of-distribution real-world spatial reasoning tasks. These findings highlight that enhancing basic spatial capabilities through synthetic generalization effectively improves complex spatial reasoning, offering insights into systematic strategies for boosting VLMs' spatial understanding. Source codes of Sparkle are available at https://github.com/YihongT/Sparkle."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tang-etal-2025-sparkle">
<titleInfo>
<title>Sparkle: Mastering Basic Spatial Capabilities in Vision Language Models Elicits Generalization to Spatial Reasoning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yihong</namePart>
<namePart type="family">Tang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ao</namePart>
<namePart type="family">Qu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhaokai</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dingyi</namePart>
<namePart type="family">Zhuang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhaofeng</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wei</namePart>
<namePart type="family">Ma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shenhao</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yunhan</namePart>
<namePart type="family">Zheng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhan</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jinhua</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>Vision-language models (VLMs) excel in many downstream tasks but struggle with spatial reasoning, which is crucial for navigation and interaction with physical environments. Specifically, many spatial reasoning tasks rely on fundamental two-dimensional (2D) capabilities, yet our evaluation shows that state-of-the-art VLMs often produce implausible or incorrect solutions for composite spatial problems, including simple pathfinding tasks that humans solve effortlessly at a glance. To address this, we explore an effective approach to enhance 2D spatial reasoning in VLMs by training them solely on basic spatial capabilities. We first disentangle 2D spatial reasoning into three core components: direction comprehension, distance estimation, and localization. Our central hypothesis is that mastering these basic capabilities will significantly boost performance on more complex spatial tasks requiring advanced reasoning and combinatorial problem-solving, as well as generalize to real-world visual-spatial scenarios. To test this hypothesis, we introduce Sparkle, a framework that generates synthetic data to provide targeted supervision for VLMs across these three basic spatial capabilities, producing an instruction dataset for each capability. Our experiments demonstrate that VLMs fine-tuned with Sparkle achieve substantial improvements, not only on basic tasks but also in generalizing to composite and out-of-distribution real-world spatial reasoning tasks. These findings highlight that enhancing basic spatial capabilities through synthetic generalization effectively improves complex spatial reasoning, offering insights into systematic strategies for boosting VLMs’ spatial understanding. Source codes of Sparkle are available at https://github.com/YihongT/Sparkle.</abstract>
<identifier type="citekey">tang-etal-2025-sparkle</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.217/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>4083</start>
<end>4103</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Sparkle: Mastering Basic Spatial Capabilities in Vision Language Models Elicits Generalization to Spatial Reasoning
%A Tang, Yihong
%A Qu, Ao
%A Wang, Zhaokai
%A Zhuang, Dingyi
%A Wu, Zhaofeng
%A Ma, Wei
%A Wang, Shenhao
%A Zheng, Yunhan
%A Zhao, Zhan
%A Zhao, Jinhua
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F tang-etal-2025-sparkle
%X Vision-language models (VLMs) excel in many downstream tasks but struggle with spatial reasoning, which is crucial for navigation and interaction with physical environments. Specifically, many spatial reasoning tasks rely on fundamental two-dimensional (2D) capabilities, yet our evaluation shows that state-of-the-art VLMs often produce implausible or incorrect solutions for composite spatial problems, including simple pathfinding tasks that humans solve effortlessly at a glance. To address this, we explore an effective approach to enhance 2D spatial reasoning in VLMs by training them solely on basic spatial capabilities. We first disentangle 2D spatial reasoning into three core components: direction comprehension, distance estimation, and localization. Our central hypothesis is that mastering these basic capabilities will significantly boost performance on more complex spatial tasks requiring advanced reasoning and combinatorial problem-solving, as well as generalize to real-world visual-spatial scenarios. To test this hypothesis, we introduce Sparkle, a framework that generates synthetic data to provide targeted supervision for VLMs across these three basic spatial capabilities, producing an instruction dataset for each capability. Our experiments demonstrate that VLMs fine-tuned with Sparkle achieve substantial improvements, not only on basic tasks but also in generalizing to composite and out-of-distribution real-world spatial reasoning tasks. These findings highlight that enhancing basic spatial capabilities through synthetic generalization effectively improves complex spatial reasoning, offering insights into systematic strategies for boosting VLMs’ spatial understanding. Source codes of Sparkle are available at https://github.com/YihongT/Sparkle.
%U https://aclanthology.org/2025.findings-emnlp.217/
%P 4083-4103
Markdown (Informal)
[Sparkle: Mastering Basic Spatial Capabilities in Vision Language Models Elicits Generalization to Spatial Reasoning](https://aclanthology.org/2025.findings-emnlp.217/) (Tang et al., Findings 2025)
ACL
- Yihong Tang, Ao Qu, Zhaokai Wang, Dingyi Zhuang, Zhaofeng Wu, Wei Ma, Shenhao Wang, Yunhan Zheng, Zhan Zhao, and Jinhua Zhao. 2025. Sparkle: Mastering Basic Spatial Capabilities in Vision Language Models Elicits Generalization to Spatial Reasoning. In Findings of the Association for Computational Linguistics: EMNLP 2025, pages 4083–4103, Suzhou, China. Association for Computational Linguistics.