@inproceedings{dewulf-2025-evaluating,
title = "Evaluating Gender Bias in {D}utch {NLP}: Insights from {R}ob{BERT}-2023 and the {HONEST} Framework",
author = "Dewulf, Marie",
editor = "Hackenbuchner, Jani{\c{c}}a and
Bentivogli, Luisa and
Daems, Joke and
Manna, Chiara and
Savoldi, Beatrice and
Vanmassenhove, Eva",
booktitle = "Proceedings of the 3rd Workshop on Gender-Inclusive Translation Technologies (GITT 2025)",
month = jun,
year = "2025",
address = "Geneva, Switzerland",
publisher = "European Association for Machine Translation",
url = "https://aclanthology.org/2025.gitt-1.7/",
pages = "91--92",
ISBN = "978-2-9701897-4-9",
abstract = "This study investigates gender bias in the Dutch RobBERT-2023 language model using an adapted version of the HONEST framework, which assesses harmful sentence completions. By translating and expanding HONEST templates to include non-binary and gender-neutral language, we systematically evaluate whether RobBERT-2023 exhibits biased or harmful outputs across gender identities. Our findings reveal that while the model{'}s overall bias score is relatively low, non-binary identities are disproportionately affected by derogatory language."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="dewulf-2025-evaluating">
<titleInfo>
<title>Evaluating Gender Bias in Dutch NLP: Insights from RobBERT-2023 and the HONEST Framework</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marie</namePart>
<namePart type="family">Dewulf</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Gender-Inclusive Translation Technologies (GITT 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Janiça</namePart>
<namePart type="family">Hackenbuchner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Luisa</namePart>
<namePart type="family">Bentivogli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joke</namePart>
<namePart type="family">Daems</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chiara</namePart>
<namePart type="family">Manna</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Beatrice</namePart>
<namePart type="family">Savoldi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eva</namePart>
<namePart type="family">Vanmassenhove</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>European Association for Machine Translation</publisher>
<place>
<placeTerm type="text">Geneva, Switzerland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">978-2-9701897-4-9</identifier>
</relatedItem>
<abstract>This study investigates gender bias in the Dutch RobBERT-2023 language model using an adapted version of the HONEST framework, which assesses harmful sentence completions. By translating and expanding HONEST templates to include non-binary and gender-neutral language, we systematically evaluate whether RobBERT-2023 exhibits biased or harmful outputs across gender identities. Our findings reveal that while the model’s overall bias score is relatively low, non-binary identities are disproportionately affected by derogatory language.</abstract>
<identifier type="citekey">dewulf-2025-evaluating</identifier>
<location>
<url>https://aclanthology.org/2025.gitt-1.7/</url>
</location>
<part>
<date>2025-06</date>
<extent unit="page">
<start>91</start>
<end>92</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Evaluating Gender Bias in Dutch NLP: Insights from RobBERT-2023 and the HONEST Framework
%A Dewulf, Marie
%Y Hackenbuchner, Janiça
%Y Bentivogli, Luisa
%Y Daems, Joke
%Y Manna, Chiara
%Y Savoldi, Beatrice
%Y Vanmassenhove, Eva
%S Proceedings of the 3rd Workshop on Gender-Inclusive Translation Technologies (GITT 2025)
%D 2025
%8 June
%I European Association for Machine Translation
%C Geneva, Switzerland
%@ 978-2-9701897-4-9
%F dewulf-2025-evaluating
%X This study investigates gender bias in the Dutch RobBERT-2023 language model using an adapted version of the HONEST framework, which assesses harmful sentence completions. By translating and expanding HONEST templates to include non-binary and gender-neutral language, we systematically evaluate whether RobBERT-2023 exhibits biased or harmful outputs across gender identities. Our findings reveal that while the model’s overall bias score is relatively low, non-binary identities are disproportionately affected by derogatory language.
%U https://aclanthology.org/2025.gitt-1.7/
%P 91-92
Markdown (Informal)
[Evaluating Gender Bias in Dutch NLP: Insights from RobBERT-2023 and the HONEST Framework](https://aclanthology.org/2025.gitt-1.7/) (Dewulf, GITT 2025)
ACL