@inproceedings{oliveira-dos-santos-etal-2021-cider,
title = "{CIDE}r-{R}: Robust Consensus-based Image Description Evaluation",
author = "Oliveira dos Santos, Gabriel and
Colombini, Esther Luna and
Avila, Sandra",
booktitle = "Proceedings of the Seventh Workshop on Noisy User-generated Text (W-NUT 2021)",
month = nov,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.wnut-1.39",
doi = "10.18653/v1/2021.wnut-1.39",
pages = "351--360",
abstract = "This paper shows that CIDEr-D, a traditional evaluation metric for image description, does not work properly on datasets where the number of words in the sentence is significantly greater than those in the MS COCO Captions dataset. We also show that CIDEr-D has performance hampered by the lack of multiple reference sentences and high variance of sentence length. To bypass this problem, we introduce CIDEr-R, which improves CIDEr-D, making it more flexible in dealing with datasets with high sentence length variance. We demonstrate that CIDEr-R is more accurate and closer to human judgment than CIDEr-D; CIDEr-R is more robust regarding the number of available references. Our results reveal that using Self-Critical Sequence Training to optimize CIDEr-R generates descriptive captions. In contrast, when CIDEr-D is optimized, the generated captions{'} length tends to be similar to the reference length. However, the models also repeat several times the same word to increase the sentence length.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="oliveira-dos-santos-etal-2021-cider">
<titleInfo>
<title>CIDEr-R: Robust Consensus-based Image Description Evaluation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Gabriel</namePart>
<namePart type="family">Oliveira dos Santos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Esther</namePart>
<namePart type="given">Luna</namePart>
<namePart type="family">Colombini</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sandra</namePart>
<namePart type="family">Avila</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Seventh Workshop on Noisy User-generated Text (W-NUT 2021)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper shows that CIDEr-D, a traditional evaluation metric for image description, does not work properly on datasets where the number of words in the sentence is significantly greater than those in the MS COCO Captions dataset. We also show that CIDEr-D has performance hampered by the lack of multiple reference sentences and high variance of sentence length. To bypass this problem, we introduce CIDEr-R, which improves CIDEr-D, making it more flexible in dealing with datasets with high sentence length variance. We demonstrate that CIDEr-R is more accurate and closer to human judgment than CIDEr-D; CIDEr-R is more robust regarding the number of available references. Our results reveal that using Self-Critical Sequence Training to optimize CIDEr-R generates descriptive captions. In contrast, when CIDEr-D is optimized, the generated captions’ length tends to be similar to the reference length. However, the models also repeat several times the same word to increase the sentence length.</abstract>
<identifier type="citekey">oliveira-dos-santos-etal-2021-cider</identifier>
<identifier type="doi">10.18653/v1/2021.wnut-1.39</identifier>
<location>
<url>https://aclanthology.org/2021.wnut-1.39</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>351</start>
<end>360</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T CIDEr-R: Robust Consensus-based Image Description Evaluation
%A Oliveira dos Santos, Gabriel
%A Colombini, Esther Luna
%A Avila, Sandra
%S Proceedings of the Seventh Workshop on Noisy User-generated Text (W-NUT 2021)
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online
%F oliveira-dos-santos-etal-2021-cider
%X This paper shows that CIDEr-D, a traditional evaluation metric for image description, does not work properly on datasets where the number of words in the sentence is significantly greater than those in the MS COCO Captions dataset. We also show that CIDEr-D has performance hampered by the lack of multiple reference sentences and high variance of sentence length. To bypass this problem, we introduce CIDEr-R, which improves CIDEr-D, making it more flexible in dealing with datasets with high sentence length variance. We demonstrate that CIDEr-R is more accurate and closer to human judgment than CIDEr-D; CIDEr-R is more robust regarding the number of available references. Our results reveal that using Self-Critical Sequence Training to optimize CIDEr-R generates descriptive captions. In contrast, when CIDEr-D is optimized, the generated captions’ length tends to be similar to the reference length. However, the models also repeat several times the same word to increase the sentence length.
%R 10.18653/v1/2021.wnut-1.39
%U https://aclanthology.org/2021.wnut-1.39
%U https://doi.org/10.18653/v1/2021.wnut-1.39
%P 351-360
Markdown (Informal)
[CIDEr-R: Robust Consensus-based Image Description Evaluation](https://aclanthology.org/2021.wnut-1.39) (Oliveira dos Santos et al., WNUT 2021)
ACL