@inproceedings{elsafoury-2023-thesis,
title = "Thesis Distillation: Investigating The Impact of Bias in {NLP} Models on Hate Speech Detection",
author = "Elsafoury, Fatma",
editor = "Elazar, Yanai and
Ettinger, Allyson and
Kassner, Nora and
Ruder, Sebastian and
A. Smith, Noah",
booktitle = "Proceedings of the Big Picture Workshop",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.bigpicture-1.5",
doi = "10.18653/v1/2023.bigpicture-1.5",
pages = "53--65",
abstract = "This paper is a summary of the work done in my PhD thesis. Where I investigate the impact of bias in NLP models on the task of hate speech detection from three perspectives: explainability, offensive stereotyping bias, and fairness. Then, I discuss the main takeaways from my thesis and how they can benefit the broader NLP community. Finally, I discuss important future research directions. The findings of my thesis suggest that the bias in NLP models impacts the task of hate speech detection from all three perspectives. And that unless we start incorporating social sciences in studying bias in NLP models, we will not effectively overcome the current limitations of measuring and mitigating bias in NLP models.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="elsafoury-2023-thesis">
<titleInfo>
<title>Thesis Distillation: Investigating The Impact of Bias in NLP Models on Hate Speech Detection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Fatma</namePart>
<namePart type="family">Elsafoury</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Big Picture Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yanai</namePart>
<namePart type="family">Elazar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Allyson</namePart>
<namePart type="family">Ettinger</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nora</namePart>
<namePart type="family">Kassner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sebastian</namePart>
<namePart type="family">Ruder</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Noah</namePart>
<namePart type="family">A. Smith</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper is a summary of the work done in my PhD thesis. Where I investigate the impact of bias in NLP models on the task of hate speech detection from three perspectives: explainability, offensive stereotyping bias, and fairness. Then, I discuss the main takeaways from my thesis and how they can benefit the broader NLP community. Finally, I discuss important future research directions. The findings of my thesis suggest that the bias in NLP models impacts the task of hate speech detection from all three perspectives. And that unless we start incorporating social sciences in studying bias in NLP models, we will not effectively overcome the current limitations of measuring and mitigating bias in NLP models.</abstract>
<identifier type="citekey">elsafoury-2023-thesis</identifier>
<identifier type="doi">10.18653/v1/2023.bigpicture-1.5</identifier>
<location>
<url>https://aclanthology.org/2023.bigpicture-1.5</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>53</start>
<end>65</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Thesis Distillation: Investigating The Impact of Bias in NLP Models on Hate Speech Detection
%A Elsafoury, Fatma
%Y Elazar, Yanai
%Y Ettinger, Allyson
%Y Kassner, Nora
%Y Ruder, Sebastian
%Y A. Smith, Noah
%S Proceedings of the Big Picture Workshop
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F elsafoury-2023-thesis
%X This paper is a summary of the work done in my PhD thesis. Where I investigate the impact of bias in NLP models on the task of hate speech detection from three perspectives: explainability, offensive stereotyping bias, and fairness. Then, I discuss the main takeaways from my thesis and how they can benefit the broader NLP community. Finally, I discuss important future research directions. The findings of my thesis suggest that the bias in NLP models impacts the task of hate speech detection from all three perspectives. And that unless we start incorporating social sciences in studying bias in NLP models, we will not effectively overcome the current limitations of measuring and mitigating bias in NLP models.
%R 10.18653/v1/2023.bigpicture-1.5
%U https://aclanthology.org/2023.bigpicture-1.5
%U https://doi.org/10.18653/v1/2023.bigpicture-1.5
%P 53-65
Markdown (Informal)
[Thesis Distillation: Investigating The Impact of Bias in NLP Models on Hate Speech Detection](https://aclanthology.org/2023.bigpicture-1.5) (Elsafoury, BigPicture 2023)
ACL