@inproceedings{aggarwal-etal-2023-dublin,
title = "{DUBLIN}: Visual Document Understanding By Language-Image Network",
author = "Aggarwal, Kriti and
Khandelwal, Aditi and
Tanmay, Kumar and
Mohammed, Owais Khan and
Liu, Qiang and
Choudhury, Monojit and
Chauhan, Hardik and
Som, Subhojit and
Chaudhary, Vishrav and
Tiwary, Saurabh",
editor = "Wang, Mingxuan and
Zitouni, Imed",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.emnlp-industry.65",
doi = "10.18653/v1/2023.emnlp-industry.65",
pages = "693--706",
abstract = "In this paper, we present DUBLIN, a pixel-based model for visual document understanding that does not rely on OCR. DUBLIN can process both images and texts in documents just by the pixels and handle diverse document types and tasks. DUBLIN is pretrained on a large corpus of document images with novel tasks that enhance its visual and linguistic abilities. We evaluate DUBLIN on various benchmarks and show that it achieves state-of-the-art performance on extractive tasks such as DocVQA, InfoVQA, AI2D, OCR-VQA, RefExp, and CORD, as well as strong performance on abstraction datasets such as VisualMRC and text captioning. Our model demonstrates the potential of OCR-free document processing and opens new avenues for applications and research.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="aggarwal-etal-2023-dublin">
<titleInfo>
<title>DUBLIN: Visual Document Understanding By Language-Image Network</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kriti</namePart>
<namePart type="family">Aggarwal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aditi</namePart>
<namePart type="family">Khandelwal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kumar</namePart>
<namePart type="family">Tanmay</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Owais</namePart>
<namePart type="given">Khan</namePart>
<namePart type="family">Mohammed</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qiang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Monojit</namePart>
<namePart type="family">Choudhury</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hardik</namePart>
<namePart type="family">Chauhan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Subhojit</namePart>
<namePart type="family">Som</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vishrav</namePart>
<namePart type="family">Chaudhary</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saurabh</namePart>
<namePart type="family">Tiwary</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mingxuan</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Imed</namePart>
<namePart type="family">Zitouni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we present DUBLIN, a pixel-based model for visual document understanding that does not rely on OCR. DUBLIN can process both images and texts in documents just by the pixels and handle diverse document types and tasks. DUBLIN is pretrained on a large corpus of document images with novel tasks that enhance its visual and linguistic abilities. We evaluate DUBLIN on various benchmarks and show that it achieves state-of-the-art performance on extractive tasks such as DocVQA, InfoVQA, AI2D, OCR-VQA, RefExp, and CORD, as well as strong performance on abstraction datasets such as VisualMRC and text captioning. Our model demonstrates the potential of OCR-free document processing and opens new avenues for applications and research.</abstract>
<identifier type="citekey">aggarwal-etal-2023-dublin</identifier>
<identifier type="doi">10.18653/v1/2023.emnlp-industry.65</identifier>
<location>
<url>https://aclanthology.org/2023.emnlp-industry.65</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>693</start>
<end>706</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T DUBLIN: Visual Document Understanding By Language-Image Network
%A Aggarwal, Kriti
%A Khandelwal, Aditi
%A Tanmay, Kumar
%A Mohammed, Owais Khan
%A Liu, Qiang
%A Choudhury, Monojit
%A Chauhan, Hardik
%A Som, Subhojit
%A Chaudhary, Vishrav
%A Tiwary, Saurabh
%Y Wang, Mingxuan
%Y Zitouni, Imed
%S Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F aggarwal-etal-2023-dublin
%X In this paper, we present DUBLIN, a pixel-based model for visual document understanding that does not rely on OCR. DUBLIN can process both images and texts in documents just by the pixels and handle diverse document types and tasks. DUBLIN is pretrained on a large corpus of document images with novel tasks that enhance its visual and linguistic abilities. We evaluate DUBLIN on various benchmarks and show that it achieves state-of-the-art performance on extractive tasks such as DocVQA, InfoVQA, AI2D, OCR-VQA, RefExp, and CORD, as well as strong performance on abstraction datasets such as VisualMRC and text captioning. Our model demonstrates the potential of OCR-free document processing and opens new avenues for applications and research.
%R 10.18653/v1/2023.emnlp-industry.65
%U https://aclanthology.org/2023.emnlp-industry.65
%U https://doi.org/10.18653/v1/2023.emnlp-industry.65
%P 693-706
Markdown (Informal)
[DUBLIN: Visual Document Understanding By Language-Image Network](https://aclanthology.org/2023.emnlp-industry.65) (Aggarwal et al., EMNLP 2023)
ACL
- Kriti Aggarwal, Aditi Khandelwal, Kumar Tanmay, Owais Khan Mohammed, Qiang Liu, Monojit Choudhury, Hardik Chauhan, Subhojit Som, Vishrav Chaudhary, and Saurabh Tiwary. 2023. DUBLIN: Visual Document Understanding By Language-Image Network. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track, pages 693–706, Singapore. Association for Computational Linguistics.