@inproceedings{ge-etal-2024-idc,
title = "{IDC}: Boost Text-to-image Retrieval via Indirect and Direct Connections",
author = "Ge, Guowei and
Hao, Kuangrong and
Hao, Lingguang",
editor = "Calzolari, Nicoletta and
Kan, Min-Yen and
Hoste, Veronique and
Lenci, Alessandro and
Sakti, Sakriani and
Xue, Nianwen",
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://aclanthology.org/2024.lrec-main.750",
pages = "8546--8555",
abstract = "The Dual Encoders (DE) framework maps image and text inputs into a coordinated representation space, and calculates their similarity directly. On the other hand, the Cross Attention (CA) framework performs modalities interactions after completing the feature embedding of images and text, and then outputs a similarity score. For scenarios with bulk query requests or large query sets, the latter is more accurate, but the former is faster. Therefore, this work finds a new way to improve the retrieval accuracy of the DE framework by borrowing the advantages of the CA framework. Drawing inspiration from image captioning, we introduce a text decoder in the model training stage to simulate the cross-modal interaction function, like the CA framework. The text decoder is eventually discarded, aligning our model with the DE framework. Finally, to ensure training stability and prevent overfitting, we modify the Self-Distillation from Last Mini-Batch and apply it to the retrieval areas. Extensive experiments conducted on the MSCOCO and Flickr30K datasets validate the effectiveness of our proposed methods. Notably, our model achieves competitive results compared to state-of-the-art approaches on the Flickr30K dataset.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ge-etal-2024-idc">
<titleInfo>
<title>IDC: Boost Text-to-image Retrieval via Indirect and Direct Connections</title>
</titleInfo>
<name type="personal">
<namePart type="given">Guowei</namePart>
<namePart type="family">Ge</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kuangrong</namePart>
<namePart type="family">Hao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lingguang</namePart>
<namePart type="family">Hao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nicoletta</namePart>
<namePart type="family">Calzolari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Min-Yen</namePart>
<namePart type="family">Kan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Veronique</namePart>
<namePart type="family">Hoste</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Lenci</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sakriani</namePart>
<namePart type="family">Sakti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nianwen</namePart>
<namePart type="family">Xue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>ELRA and ICCL</publisher>
<place>
<placeTerm type="text">Torino, Italia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The Dual Encoders (DE) framework maps image and text inputs into a coordinated representation space, and calculates their similarity directly. On the other hand, the Cross Attention (CA) framework performs modalities interactions after completing the feature embedding of images and text, and then outputs a similarity score. For scenarios with bulk query requests or large query sets, the latter is more accurate, but the former is faster. Therefore, this work finds a new way to improve the retrieval accuracy of the DE framework by borrowing the advantages of the CA framework. Drawing inspiration from image captioning, we introduce a text decoder in the model training stage to simulate the cross-modal interaction function, like the CA framework. The text decoder is eventually discarded, aligning our model with the DE framework. Finally, to ensure training stability and prevent overfitting, we modify the Self-Distillation from Last Mini-Batch and apply it to the retrieval areas. Extensive experiments conducted on the MSCOCO and Flickr30K datasets validate the effectiveness of our proposed methods. Notably, our model achieves competitive results compared to state-of-the-art approaches on the Flickr30K dataset.</abstract>
<identifier type="citekey">ge-etal-2024-idc</identifier>
<location>
<url>https://aclanthology.org/2024.lrec-main.750</url>
</location>
<part>
<date>2024-05</date>
<extent unit="page">
<start>8546</start>
<end>8555</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T IDC: Boost Text-to-image Retrieval via Indirect and Direct Connections
%A Ge, Guowei
%A Hao, Kuangrong
%A Hao, Lingguang
%Y Calzolari, Nicoletta
%Y Kan, Min-Yen
%Y Hoste, Veronique
%Y Lenci, Alessandro
%Y Sakti, Sakriani
%Y Xue, Nianwen
%S Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)
%D 2024
%8 May
%I ELRA and ICCL
%C Torino, Italia
%F ge-etal-2024-idc
%X The Dual Encoders (DE) framework maps image and text inputs into a coordinated representation space, and calculates their similarity directly. On the other hand, the Cross Attention (CA) framework performs modalities interactions after completing the feature embedding of images and text, and then outputs a similarity score. For scenarios with bulk query requests or large query sets, the latter is more accurate, but the former is faster. Therefore, this work finds a new way to improve the retrieval accuracy of the DE framework by borrowing the advantages of the CA framework. Drawing inspiration from image captioning, we introduce a text decoder in the model training stage to simulate the cross-modal interaction function, like the CA framework. The text decoder is eventually discarded, aligning our model with the DE framework. Finally, to ensure training stability and prevent overfitting, we modify the Self-Distillation from Last Mini-Batch and apply it to the retrieval areas. Extensive experiments conducted on the MSCOCO and Flickr30K datasets validate the effectiveness of our proposed methods. Notably, our model achieves competitive results compared to state-of-the-art approaches on the Flickr30K dataset.
%U https://aclanthology.org/2024.lrec-main.750
%P 8546-8555
Markdown (Informal)
[IDC: Boost Text-to-image Retrieval via Indirect and Direct Connections](https://aclanthology.org/2024.lrec-main.750) (Ge et al., LREC-COLING 2024)
ACL