@inproceedings{askarian-etal-2021-curriculum,
title = "Curriculum Learning Effectively Improves Low Data {VQA}",
author = "Askarian, Narjes and
Abbasnejad, Ehsan and
Zukerman, Ingrid and
Buntine, Wray and
Haffari, Gholamreza",
editor = "Rahimi, Afshin and
Lane, William and
Zuccon, Guido",
booktitle = "Proceedings of the 19th Annual Workshop of the Australasian Language Technology Association",
month = dec,
year = "2021",
address = "Online",
publisher = "Australasian Language Technology Association",
url = "https://aclanthology.org/2021.alta-1.3",
pages = "22--33",
abstract = "Visual question answering (VQA) models, in particular modular ones, are commonly trained on large-scale datasets to achieve state-of-the-art performance. However, such datasets are sometimes not available. Further, it has been shown that training these models on small datasets significantly reduces their accuracy. In this paper, we propose curriculum-based learning (CL) regime to increase the accuracy of VQA models trained on small datasets. Specifically, we offer three criteria to rank the samples in these datasets and propose a training strategy for each criterion. Our results show that, for small datasets, our CL approach yields more accurate results than those obtained when training with no curriculum.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="askarian-etal-2021-curriculum">
<titleInfo>
<title>Curriculum Learning Effectively Improves Low Data VQA</title>
</titleInfo>
<name type="personal">
<namePart type="given">Narjes</namePart>
<namePart type="family">Askarian</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ehsan</namePart>
<namePart type="family">Abbasnejad</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ingrid</namePart>
<namePart type="family">Zukerman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wray</namePart>
<namePart type="family">Buntine</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gholamreza</namePart>
<namePart type="family">Haffari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 19th Annual Workshop of the Australasian Language Technology Association</title>
</titleInfo>
<name type="personal">
<namePart type="given">Afshin</namePart>
<namePart type="family">Rahimi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">William</namePart>
<namePart type="family">Lane</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guido</namePart>
<namePart type="family">Zuccon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Australasian Language Technology Association</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Visual question answering (VQA) models, in particular modular ones, are commonly trained on large-scale datasets to achieve state-of-the-art performance. However, such datasets are sometimes not available. Further, it has been shown that training these models on small datasets significantly reduces their accuracy. In this paper, we propose curriculum-based learning (CL) regime to increase the accuracy of VQA models trained on small datasets. Specifically, we offer three criteria to rank the samples in these datasets and propose a training strategy for each criterion. Our results show that, for small datasets, our CL approach yields more accurate results than those obtained when training with no curriculum.</abstract>
<identifier type="citekey">askarian-etal-2021-curriculum</identifier>
<location>
<url>https://aclanthology.org/2021.alta-1.3</url>
</location>
<part>
<date>2021-12</date>
<extent unit="page">
<start>22</start>
<end>33</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Curriculum Learning Effectively Improves Low Data VQA
%A Askarian, Narjes
%A Abbasnejad, Ehsan
%A Zukerman, Ingrid
%A Buntine, Wray
%A Haffari, Gholamreza
%Y Rahimi, Afshin
%Y Lane, William
%Y Zuccon, Guido
%S Proceedings of the 19th Annual Workshop of the Australasian Language Technology Association
%D 2021
%8 December
%I Australasian Language Technology Association
%C Online
%F askarian-etal-2021-curriculum
%X Visual question answering (VQA) models, in particular modular ones, are commonly trained on large-scale datasets to achieve state-of-the-art performance. However, such datasets are sometimes not available. Further, it has been shown that training these models on small datasets significantly reduces their accuracy. In this paper, we propose curriculum-based learning (CL) regime to increase the accuracy of VQA models trained on small datasets. Specifically, we offer three criteria to rank the samples in these datasets and propose a training strategy for each criterion. Our results show that, for small datasets, our CL approach yields more accurate results than those obtained when training with no curriculum.
%U https://aclanthology.org/2021.alta-1.3
%P 22-33
Markdown (Informal)
[Curriculum Learning Effectively Improves Low Data VQA](https://aclanthology.org/2021.alta-1.3) (Askarian et al., ALTA 2021)
ACL
- Narjes Askarian, Ehsan Abbasnejad, Ingrid Zukerman, Wray Buntine, and Gholamreza Haffari. 2021. Curriculum Learning Effectively Improves Low Data VQA. In Proceedings of the 19th Annual Workshop of the Australasian Language Technology Association, pages 22–33, Online. Australasian Language Technology Association.