@inproceedings{bhargava-etal-2021-generalization,
title = "Generalization in {NLI}: Ways (Not) To Go Beyond Simple Heuristics",
author = "Bhargava, Prajjwal and
Drozd, Aleksandr and
Rogers, Anna",
booktitle = "Proceedings of the Second Workshop on Insights from Negative Results in NLP",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.insights-1.18",
doi = "10.18653/v1/2021.insights-1.18",
pages = "125--135",
abstract = "Much of recent progress in NLU was shown to be due to models{'} learning dataset-specific heuristics. We conduct a case study of generalization in NLI (from MNLI to the adversarially constructed HANS dataset) in a range of BERT-based architectures (adapters, Siamese Transformers, HEX debiasing), as well as with subsampling the data and increasing the model size. We report 2 successful and 3 unsuccessful strategies, all providing insights into how Transformer-based models learn to generalize.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bhargava-etal-2021-generalization">
<titleInfo>
<title>Generalization in NLI: Ways (Not) To Go Beyond Simple Heuristics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Prajjwal</namePart>
<namePart type="family">Bhargava</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aleksandr</namePart>
<namePart type="family">Drozd</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rogers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Insights from Negative Results in NLP</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online and Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Much of recent progress in NLU was shown to be due to models’ learning dataset-specific heuristics. We conduct a case study of generalization in NLI (from MNLI to the adversarially constructed HANS dataset) in a range of BERT-based architectures (adapters, Siamese Transformers, HEX debiasing), as well as with subsampling the data and increasing the model size. We report 2 successful and 3 unsuccessful strategies, all providing insights into how Transformer-based models learn to generalize.</abstract>
<identifier type="citekey">bhargava-etal-2021-generalization</identifier>
<identifier type="doi">10.18653/v1/2021.insights-1.18</identifier>
<location>
<url>https://aclanthology.org/2021.insights-1.18</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>125</start>
<end>135</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Generalization in NLI: Ways (Not) To Go Beyond Simple Heuristics
%A Bhargava, Prajjwal
%A Drozd, Aleksandr
%A Rogers, Anna
%S Proceedings of the Second Workshop on Insights from Negative Results in NLP
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online and Punta Cana, Dominican Republic
%F bhargava-etal-2021-generalization
%X Much of recent progress in NLU was shown to be due to models’ learning dataset-specific heuristics. We conduct a case study of generalization in NLI (from MNLI to the adversarially constructed HANS dataset) in a range of BERT-based architectures (adapters, Siamese Transformers, HEX debiasing), as well as with subsampling the data and increasing the model size. We report 2 successful and 3 unsuccessful strategies, all providing insights into how Transformer-based models learn to generalize.
%R 10.18653/v1/2021.insights-1.18
%U https://aclanthology.org/2021.insights-1.18
%U https://doi.org/10.18653/v1/2021.insights-1.18
%P 125-135
Markdown (Informal)
[Generalization in NLI: Ways (Not) To Go Beyond Simple Heuristics](https://aclanthology.org/2021.insights-1.18) (Bhargava et al., insights 2021)
ACL