@inproceedings{yang-etal-2024-retrieval,
title = "Retrieval Evaluation for Long-Form and Knowledge-Intensive Image{--}Text Article Composition",
author = "Yang, Jheng-Hong and
Lassance, Carlos and
Rezende, Rafael S. and
Srinivasan, Krishna and
Clinchant, St{\'e}phane and
Lin, Jimmy",
editor = "Lucie-Aim{\'e}e, Lucie and
Fan, Angela and
Gwadabe, Tajuddeen and
Johnson, Isaac and
Petroni, Fabio and
van Strien, Daniel",
booktitle = "Proceedings of the First Workshop on Advancing Natural Language Processing for Wikipedia",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.wikinlp-1.9",
pages = "36--45",
abstract = "This paper examines the integration of images into Wikipedia articles by evaluating image{--}text retrieval tasks in multimedia content creation, focusing on developing retrieval-augmented tools to enhance the creation of high-quality multimedia articles. Despite ongoing research, the interplay between text and visuals, such as photos and diagrams, remains underexplored, limiting support for real-world applications. We introduce AToMiC, a dataset for long-form, knowledge-intensive image{--}text retrieval, detailing its task design, evaluation protocols, and relevance criteria.Our findings show that a hybrid approach combining a sparse retriever with a dense retriever achieves satisfactory effectiveness, with nDCG@10 scores around 0.4 for Image Suggestion and Image Promotion tasks, providing insights into the challenges of retrieval evaluation in an image{--}text interleaved article composition context.The AToMiC dataset is available at https://github.com/TREC-AToMiC/AToMiC.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yang-etal-2024-retrieval">
<titleInfo>
<title>Retrieval Evaluation for Long-Form and Knowledge-Intensive Image–Text Article Composition</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jheng-Hong</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carlos</namePart>
<namePart type="family">Lassance</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rafael</namePart>
<namePart type="given">S</namePart>
<namePart type="family">Rezende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Krishna</namePart>
<namePart type="family">Srinivasan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stéphane</namePart>
<namePart type="family">Clinchant</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jimmy</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Advancing Natural Language Processing for Wikipedia</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lucie</namePart>
<namePart type="family">Lucie-Aimée</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Angela</namePart>
<namePart type="family">Fan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tajuddeen</namePart>
<namePart type="family">Gwadabe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isaac</namePart>
<namePart type="family">Johnson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fabio</namePart>
<namePart type="family">Petroni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">van Strien</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper examines the integration of images into Wikipedia articles by evaluating image–text retrieval tasks in multimedia content creation, focusing on developing retrieval-augmented tools to enhance the creation of high-quality multimedia articles. Despite ongoing research, the interplay between text and visuals, such as photos and diagrams, remains underexplored, limiting support for real-world applications. We introduce AToMiC, a dataset for long-form, knowledge-intensive image–text retrieval, detailing its task design, evaluation protocols, and relevance criteria.Our findings show that a hybrid approach combining a sparse retriever with a dense retriever achieves satisfactory effectiveness, with nDCG@10 scores around 0.4 for Image Suggestion and Image Promotion tasks, providing insights into the challenges of retrieval evaluation in an image–text interleaved article composition context.The AToMiC dataset is available at https://github.com/TREC-AToMiC/AToMiC.</abstract>
<identifier type="citekey">yang-etal-2024-retrieval</identifier>
<location>
<url>https://aclanthology.org/2024.wikinlp-1.9</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>36</start>
<end>45</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Retrieval Evaluation for Long-Form and Knowledge-Intensive Image–Text Article Composition
%A Yang, Jheng-Hong
%A Lassance, Carlos
%A Rezende, Rafael S.
%A Srinivasan, Krishna
%A Clinchant, Stéphane
%A Lin, Jimmy
%Y Lucie-Aimée, Lucie
%Y Fan, Angela
%Y Gwadabe, Tajuddeen
%Y Johnson, Isaac
%Y Petroni, Fabio
%Y van Strien, Daniel
%S Proceedings of the First Workshop on Advancing Natural Language Processing for Wikipedia
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F yang-etal-2024-retrieval
%X This paper examines the integration of images into Wikipedia articles by evaluating image–text retrieval tasks in multimedia content creation, focusing on developing retrieval-augmented tools to enhance the creation of high-quality multimedia articles. Despite ongoing research, the interplay between text and visuals, such as photos and diagrams, remains underexplored, limiting support for real-world applications. We introduce AToMiC, a dataset for long-form, knowledge-intensive image–text retrieval, detailing its task design, evaluation protocols, and relevance criteria.Our findings show that a hybrid approach combining a sparse retriever with a dense retriever achieves satisfactory effectiveness, with nDCG@10 scores around 0.4 for Image Suggestion and Image Promotion tasks, providing insights into the challenges of retrieval evaluation in an image–text interleaved article composition context.The AToMiC dataset is available at https://github.com/TREC-AToMiC/AToMiC.
%U https://aclanthology.org/2024.wikinlp-1.9
%P 36-45
Markdown (Informal)
[Retrieval Evaluation for Long-Form and Knowledge-Intensive Image–Text Article Composition](https://aclanthology.org/2024.wikinlp-1.9) (Yang et al., WikiNLP 2024)
ACL