@inproceedings{ravichander-etal-2023-bias,
title = "When and Why Does Bias Mitigation Work?",
author = "Ravichander, Abhilasha and
Stacey, Joe and
Rei, Marek",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-emnlp.619",
doi = "10.18653/v1/2023.findings-emnlp.619",
pages = "9233--9247",
abstract = "Neural models have been shown to exploit shallow surface features to perform language understanding tasks, rather than learning the deeper language understanding and reasoning skills that practitioners desire. Previous work has developed debiasing techniques to pressure models away from spurious features or artifacts in datasets, with the goal of having models instead learn useful, task-relevant representations. However, what do models actually learn as a result of such debiasing procedures? In this work, we evaluate three model debiasing strategies, and through a set of carefully designed tests we show how debiasing can actually increase the model{'}s reliance on hidden biases, instead of learning robust features that help it solve a task. Further, we demonstrate how even debiasing models against all shallow features in a dataset may still not help models address NLP tasks. As a result, we suggest that debiasing existing models may not be sufficient for many language understanding tasks, and future work should consider new learning paradigms, to address complex challenges such as commonsense reasoning and inference.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ravichander-etal-2023-bias">
<titleInfo>
<title>When and Why Does Bias Mitigation Work?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Abhilasha</namePart>
<namePart type="family">Ravichander</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joe</namePart>
<namePart type="family">Stacey</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marek</namePart>
<namePart type="family">Rei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Neural models have been shown to exploit shallow surface features to perform language understanding tasks, rather than learning the deeper language understanding and reasoning skills that practitioners desire. Previous work has developed debiasing techniques to pressure models away from spurious features or artifacts in datasets, with the goal of having models instead learn useful, task-relevant representations. However, what do models actually learn as a result of such debiasing procedures? In this work, we evaluate three model debiasing strategies, and through a set of carefully designed tests we show how debiasing can actually increase the model’s reliance on hidden biases, instead of learning robust features that help it solve a task. Further, we demonstrate how even debiasing models against all shallow features in a dataset may still not help models address NLP tasks. As a result, we suggest that debiasing existing models may not be sufficient for many language understanding tasks, and future work should consider new learning paradigms, to address complex challenges such as commonsense reasoning and inference.</abstract>
<identifier type="citekey">ravichander-etal-2023-bias</identifier>
<identifier type="doi">10.18653/v1/2023.findings-emnlp.619</identifier>
<location>
<url>https://aclanthology.org/2023.findings-emnlp.619</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>9233</start>
<end>9247</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T When and Why Does Bias Mitigation Work?
%A Ravichander, Abhilasha
%A Stacey, Joe
%A Rei, Marek
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Findings of the Association for Computational Linguistics: EMNLP 2023
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F ravichander-etal-2023-bias
%X Neural models have been shown to exploit shallow surface features to perform language understanding tasks, rather than learning the deeper language understanding and reasoning skills that practitioners desire. Previous work has developed debiasing techniques to pressure models away from spurious features or artifacts in datasets, with the goal of having models instead learn useful, task-relevant representations. However, what do models actually learn as a result of such debiasing procedures? In this work, we evaluate three model debiasing strategies, and through a set of carefully designed tests we show how debiasing can actually increase the model’s reliance on hidden biases, instead of learning robust features that help it solve a task. Further, we demonstrate how even debiasing models against all shallow features in a dataset may still not help models address NLP tasks. As a result, we suggest that debiasing existing models may not be sufficient for many language understanding tasks, and future work should consider new learning paradigms, to address complex challenges such as commonsense reasoning and inference.
%R 10.18653/v1/2023.findings-emnlp.619
%U https://aclanthology.org/2023.findings-emnlp.619
%U https://doi.org/10.18653/v1/2023.findings-emnlp.619
%P 9233-9247
Markdown (Informal)
[When and Why Does Bias Mitigation Work?](https://aclanthology.org/2023.findings-emnlp.619) (Ravichander et al., Findings 2023)
ACL
- Abhilasha Ravichander, Joe Stacey, and Marek Rei. 2023. When and Why Does Bias Mitigation Work?. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 9233–9247, Singapore. Association for Computational Linguistics.