@inproceedings{kumar-etal-2025-mitigating,
title = "Mitigating Gender Bias in Job Ranking Systems Using Job Advertisement Neutrality",
author = "Kumar, Deepak and
Masoudian, Shahed and
Melchiorre, Alessandro B. and
Schedl, Markus",
editor = "Atwell, Katherine and
Biester, Laura and
Borah, Angana and
Dementieva, Daryna and
Ignat, Oana and
Kotonya, Neema and
Liu, Ziyi and
Wan, Ruyuan and
Wilson, Steven and
Zhao, Jieyu",
booktitle = "Proceedings of the Fourth Workshop on NLP for Positive Impact (NLP4PI)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.nlp4pi-1.23/",
doi = "10.18653/v1/2025.nlp4pi-1.23",
pages = "264--271",
ISBN = "978-1-959429-19-7",
abstract = "Transformer-based Job Ranking Systems (JRSs) are vulnerable to societal biases inherited in unbalanced datasets. These biases often manifest as unjust job rankings, particularly disadvantaging candidates of different genders. Most bias mitigation techniques leverage candidates' gender and align gender distributions within the embeddings of JRSs to mitigate bias. While such methods effectively align distributional properties and make JRSs agnostic to gender, they frequently fall short in addressing empirical fairness metrics, such as the performance gap across genders. In this study, we shift our attention from candidate gender to mitigate bias based on gendered language in job advertisements. We propose a novel neutrality score based on automatically discovered biased words in job ads and use it to re-rank the model{'}s decisions. We evaluate our method by comparing it with different bias mitigation strategies and empirically demonstrate that our proposed method not only improves fairness but can also enhance the model{'}s performance."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kumar-etal-2025-mitigating">
<titleInfo>
<title>Mitigating Gender Bias in Job Ranking Systems Using Job Advertisement Neutrality</title>
</titleInfo>
<name type="personal">
<namePart type="given">Deepak</namePart>
<namePart type="family">Kumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shahed</namePart>
<namePart type="family">Masoudian</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="given">B</namePart>
<namePart type="family">Melchiorre</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Markus</namePart>
<namePart type="family">Schedl</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourth Workshop on NLP for Positive Impact (NLP4PI)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Katherine</namePart>
<namePart type="family">Atwell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Laura</namePart>
<namePart type="family">Biester</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Angana</namePart>
<namePart type="family">Borah</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daryna</namePart>
<namePart type="family">Dementieva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Oana</namePart>
<namePart type="family">Ignat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Neema</namePart>
<namePart type="family">Kotonya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ziyi</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ruyuan</namePart>
<namePart type="family">Wan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Wilson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jieyu</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">978-1-959429-19-7</identifier>
</relatedItem>
<abstract>Transformer-based Job Ranking Systems (JRSs) are vulnerable to societal biases inherited in unbalanced datasets. These biases often manifest as unjust job rankings, particularly disadvantaging candidates of different genders. Most bias mitigation techniques leverage candidates’ gender and align gender distributions within the embeddings of JRSs to mitigate bias. While such methods effectively align distributional properties and make JRSs agnostic to gender, they frequently fall short in addressing empirical fairness metrics, such as the performance gap across genders. In this study, we shift our attention from candidate gender to mitigate bias based on gendered language in job advertisements. We propose a novel neutrality score based on automatically discovered biased words in job ads and use it to re-rank the model’s decisions. We evaluate our method by comparing it with different bias mitigation strategies and empirically demonstrate that our proposed method not only improves fairness but can also enhance the model’s performance.</abstract>
<identifier type="citekey">kumar-etal-2025-mitigating</identifier>
<identifier type="doi">10.18653/v1/2025.nlp4pi-1.23</identifier>
<location>
<url>https://aclanthology.org/2025.nlp4pi-1.23/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>264</start>
<end>271</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Mitigating Gender Bias in Job Ranking Systems Using Job Advertisement Neutrality
%A Kumar, Deepak
%A Masoudian, Shahed
%A Melchiorre, Alessandro B.
%A Schedl, Markus
%Y Atwell, Katherine
%Y Biester, Laura
%Y Borah, Angana
%Y Dementieva, Daryna
%Y Ignat, Oana
%Y Kotonya, Neema
%Y Liu, Ziyi
%Y Wan, Ruyuan
%Y Wilson, Steven
%Y Zhao, Jieyu
%S Proceedings of the Fourth Workshop on NLP for Positive Impact (NLP4PI)
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 978-1-959429-19-7
%F kumar-etal-2025-mitigating
%X Transformer-based Job Ranking Systems (JRSs) are vulnerable to societal biases inherited in unbalanced datasets. These biases often manifest as unjust job rankings, particularly disadvantaging candidates of different genders. Most bias mitigation techniques leverage candidates’ gender and align gender distributions within the embeddings of JRSs to mitigate bias. While such methods effectively align distributional properties and make JRSs agnostic to gender, they frequently fall short in addressing empirical fairness metrics, such as the performance gap across genders. In this study, we shift our attention from candidate gender to mitigate bias based on gendered language in job advertisements. We propose a novel neutrality score based on automatically discovered biased words in job ads and use it to re-rank the model’s decisions. We evaluate our method by comparing it with different bias mitigation strategies and empirically demonstrate that our proposed method not only improves fairness but can also enhance the model’s performance.
%R 10.18653/v1/2025.nlp4pi-1.23
%U https://aclanthology.org/2025.nlp4pi-1.23/
%U https://doi.org/10.18653/v1/2025.nlp4pi-1.23
%P 264-271
Markdown (Informal)
[Mitigating Gender Bias in Job Ranking Systems Using Job Advertisement Neutrality](https://aclanthology.org/2025.nlp4pi-1.23/) (Kumar et al., NLP4PI 2025)
ACL