@inproceedings{ueda-etal-2026-scan,
title = "{SCAN}: Semantic Document Layout Analysis for Textual and Visual Retrieval-Augmented Generation",
author = "Ueda, Nobuhiro and
Dong, Yuyang and
Boros, Kriszti{\'a}n and
Ito, Daiki and
Sera, Takuya and
Oyamada, Masafumi",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Findings of the {A}ssociation for {C}omputational {L}inguistics: {EACL} 2026",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2026.findings-eacl.82/",
pages = "1618--1637",
ISBN = "979-8-89176-386-9",
abstract = "With the increasing adoption of Large Language Models (LLMs) and Vision-Language Models (VLMs),rich document analysis technologies for applications like Retrieval-Augmented Generation (RAG)and visual RAG are gaining significant attention.Recent research indicates that using VLMs yields better RAG performance,but processing rich documents remains a challenge since a single page contains large amounts of information.In this paper, we present SCAN (SemantiC Document Layout ANalysis),a novel approach that enhances both textual and visual Retrieval-Augmented Generation (RAG) systemsthat work with visually rich documents.It is a VLM-friendly approach that identifies document components with appropriate semantic granularity,balancing context preservation with processing efficiency.SCAN uses a coarse-grained semantic approach that divides documents into coherent regions covering contiguous components.We trained the SCAN model by fine-tuning object detection models on an annotated dataset.Our experimental results across English and Japanese datasets demonstrate that applying SCAN improvesend-to-end textual RAG performance by up to 9.4 points and visual RAG performance by up to 10.4 points,outperforming conventional approaches and even commercial document processing solutions."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ueda-etal-2026-scan">
<titleInfo>
<title>SCAN: Semantic Document Layout Analysis for Textual and Visual Retrieval-Augmented Generation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nobuhiro</namePart>
<namePart type="family">Ueda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yuyang</namePart>
<namePart type="family">Dong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Krisztián</namePart>
<namePart type="family">Boros</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daiki</namePart>
<namePart type="family">Ito</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Takuya</namePart>
<namePart type="family">Sera</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Masafumi</namePart>
<namePart type="family">Oyamada</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2026-03</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EACL 2026</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vera</namePart>
<namePart type="family">Demberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kentaro</namePart>
<namePart type="family">Inui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lluís</namePart>
<namePart type="family">Marquez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Rabat, Morocco</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-386-9</identifier>
</relatedItem>
<abstract>With the increasing adoption of Large Language Models (LLMs) and Vision-Language Models (VLMs),rich document analysis technologies for applications like Retrieval-Augmented Generation (RAG)and visual RAG are gaining significant attention.Recent research indicates that using VLMs yields better RAG performance,but processing rich documents remains a challenge since a single page contains large amounts of information.In this paper, we present SCAN (SemantiC Document Layout ANalysis),a novel approach that enhances both textual and visual Retrieval-Augmented Generation (RAG) systemsthat work with visually rich documents.It is a VLM-friendly approach that identifies document components with appropriate semantic granularity,balancing context preservation with processing efficiency.SCAN uses a coarse-grained semantic approach that divides documents into coherent regions covering contiguous components.We trained the SCAN model by fine-tuning object detection models on an annotated dataset.Our experimental results across English and Japanese datasets demonstrate that applying SCAN improvesend-to-end textual RAG performance by up to 9.4 points and visual RAG performance by up to 10.4 points,outperforming conventional approaches and even commercial document processing solutions.</abstract>
<identifier type="citekey">ueda-etal-2026-scan</identifier>
<location>
<url>https://aclanthology.org/2026.findings-eacl.82/</url>
</location>
<part>
<date>2026-03</date>
<extent unit="page">
<start>1618</start>
<end>1637</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T SCAN: Semantic Document Layout Analysis for Textual and Visual Retrieval-Augmented Generation
%A Ueda, Nobuhiro
%A Dong, Yuyang
%A Boros, Krisztián
%A Ito, Daiki
%A Sera, Takuya
%A Oyamada, Masafumi
%Y Demberg, Vera
%Y Inui, Kentaro
%Y Marquez, Lluís
%S Findings of the Association for Computational Linguistics: EACL 2026
%D 2026
%8 March
%I Association for Computational Linguistics
%C Rabat, Morocco
%@ 979-8-89176-386-9
%F ueda-etal-2026-scan
%X With the increasing adoption of Large Language Models (LLMs) and Vision-Language Models (VLMs),rich document analysis technologies for applications like Retrieval-Augmented Generation (RAG)and visual RAG are gaining significant attention.Recent research indicates that using VLMs yields better RAG performance,but processing rich documents remains a challenge since a single page contains large amounts of information.In this paper, we present SCAN (SemantiC Document Layout ANalysis),a novel approach that enhances both textual and visual Retrieval-Augmented Generation (RAG) systemsthat work with visually rich documents.It is a VLM-friendly approach that identifies document components with appropriate semantic granularity,balancing context preservation with processing efficiency.SCAN uses a coarse-grained semantic approach that divides documents into coherent regions covering contiguous components.We trained the SCAN model by fine-tuning object detection models on an annotated dataset.Our experimental results across English and Japanese datasets demonstrate that applying SCAN improvesend-to-end textual RAG performance by up to 9.4 points and visual RAG performance by up to 10.4 points,outperforming conventional approaches and even commercial document processing solutions.
%U https://aclanthology.org/2026.findings-eacl.82/
%P 1618-1637
Markdown (Informal)
[SCAN: Semantic Document Layout Analysis for Textual and Visual Retrieval-Augmented Generation](https://aclanthology.org/2026.findings-eacl.82/) (Ueda et al., Findings 2026)
ACL