@inproceedings{dopierre-etal-2021-neural,
title = "A Neural Few-Shot Text Classification Reality Check",
author = "Dopierre, Thomas and
Gravier, Christophe and
Logerais, Wilfried",
editor = "Merlo, Paola and
Tiedemann, Jorg and
Tsarfaty, Reut",
booktitle = "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume",
month = apr,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.eacl-main.79",
doi = "10.18653/v1/2021.eacl-main.79",
pages = "935--943",
abstract = "Modern classification models tend to struggle when the amount of annotated data is scarce. To overcome this issue, several neural few-shot classification models have emerged, yielding significant progress over time, both in Computer Vision and Natural Language Processing. In the latter, such models used to rely on fixed word embeddings, before the advent of transformers. Additionally, some models used in Computer Vision are yet to be tested in NLP applications. In this paper, we compare all these models, first adapting those made in the field of image processing to NLP, and second providing them access to transformers. We then test these models equipped with the same transformer-based encoder on the intent detection task, known for having a large amount of classes. Our results reveal that while methods perform almost equally on the ARSC dataset, this is not the case for the Intent Detection task, where most recent and supposedly best competitors perform worse than older and simpler ones (while all are are given access to transformers). We also show that a simple baseline is surprisingly strong. All the new developed models as well as the evaluation framework are made publicly available.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="dopierre-etal-2021-neural">
<titleInfo>
<title>A Neural Few-Shot Text Classification Reality Check</title>
</titleInfo>
<name type="personal">
<namePart type="given">Thomas</namePart>
<namePart type="family">Dopierre</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christophe</namePart>
<namePart type="family">Gravier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wilfried</namePart>
<namePart type="family">Logerais</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume</title>
</titleInfo>
<name type="personal">
<namePart type="given">Paola</namePart>
<namePart type="family">Merlo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jorg</namePart>
<namePart type="family">Tiedemann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Reut</namePart>
<namePart type="family">Tsarfaty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Modern classification models tend to struggle when the amount of annotated data is scarce. To overcome this issue, several neural few-shot classification models have emerged, yielding significant progress over time, both in Computer Vision and Natural Language Processing. In the latter, such models used to rely on fixed word embeddings, before the advent of transformers. Additionally, some models used in Computer Vision are yet to be tested in NLP applications. In this paper, we compare all these models, first adapting those made in the field of image processing to NLP, and second providing them access to transformers. We then test these models equipped with the same transformer-based encoder on the intent detection task, known for having a large amount of classes. Our results reveal that while methods perform almost equally on the ARSC dataset, this is not the case for the Intent Detection task, where most recent and supposedly best competitors perform worse than older and simpler ones (while all are are given access to transformers). We also show that a simple baseline is surprisingly strong. All the new developed models as well as the evaluation framework are made publicly available.</abstract>
<identifier type="citekey">dopierre-etal-2021-neural</identifier>
<identifier type="doi">10.18653/v1/2021.eacl-main.79</identifier>
<location>
<url>https://aclanthology.org/2021.eacl-main.79</url>
</location>
<part>
<date>2021-04</date>
<extent unit="page">
<start>935</start>
<end>943</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Neural Few-Shot Text Classification Reality Check
%A Dopierre, Thomas
%A Gravier, Christophe
%A Logerais, Wilfried
%Y Merlo, Paola
%Y Tiedemann, Jorg
%Y Tsarfaty, Reut
%S Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume
%D 2021
%8 April
%I Association for Computational Linguistics
%C Online
%F dopierre-etal-2021-neural
%X Modern classification models tend to struggle when the amount of annotated data is scarce. To overcome this issue, several neural few-shot classification models have emerged, yielding significant progress over time, both in Computer Vision and Natural Language Processing. In the latter, such models used to rely on fixed word embeddings, before the advent of transformers. Additionally, some models used in Computer Vision are yet to be tested in NLP applications. In this paper, we compare all these models, first adapting those made in the field of image processing to NLP, and second providing them access to transformers. We then test these models equipped with the same transformer-based encoder on the intent detection task, known for having a large amount of classes. Our results reveal that while methods perform almost equally on the ARSC dataset, this is not the case for the Intent Detection task, where most recent and supposedly best competitors perform worse than older and simpler ones (while all are are given access to transformers). We also show that a simple baseline is surprisingly strong. All the new developed models as well as the evaluation framework are made publicly available.
%R 10.18653/v1/2021.eacl-main.79
%U https://aclanthology.org/2021.eacl-main.79
%U https://doi.org/10.18653/v1/2021.eacl-main.79
%P 935-943
Markdown (Informal)
[A Neural Few-Shot Text Classification Reality Check](https://aclanthology.org/2021.eacl-main.79) (Dopierre et al., EACL 2021)
ACL
- Thomas Dopierre, Christophe Gravier, and Wilfried Logerais. 2021. A Neural Few-Shot Text Classification Reality Check. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 935–943, Online. Association for Computational Linguistics.