@inproceedings{schwertmann-etal-2023-model,
title = "Model-Agnostic Bias Measurement in Link Prediction",
author = "Schwertmann, Lena and
Kannan Ravi, Manoj Prabhakar and
de Melo, Gerard",
editor = "Vlachos, Andreas and
Augenstein, Isabelle",
booktitle = "Findings of the Association for Computational Linguistics: EACL 2023",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-eacl.121",
doi = "10.18653/v1/2023.findings-eacl.121",
pages = "1632--1648",
abstract = "Link prediction models based on factual knowledge graphs are commonly used in applications such as search and question answering. However, work investigating social bias in these models has been limited. Previous work focused on knowledge graph embeddings, so more recent classes of models achieving superior results by fine-tuning Transformers have not yet been investigated. We therefore present a model-agnostic approach for bias measurement leveraging fairness metrics to compare bias in knowledge graph embedding-based predictions (KG only) with models that use pre-trained, Transformer-based language models (KG+LM). We further create a dataset to measure gender bias in occupation predictions and assess whether the KG+LM models are more or less biased than KG only models. We find that gender bias tends to be higher for the KG+LM models and analyze potential connections to the accuracy of the models and the data bias inherent in our dataset. Finally, we discuss the limitations and ethical considerations of our work. The repository containing the source code and the data set is publicly available at \url{https://github.com/lena-schwert/comparing-bias-in-KG-models}.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="schwertmann-etal-2023-model">
<titleInfo>
<title>Model-Agnostic Bias Measurement in Link Prediction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lena</namePart>
<namePart type="family">Schwertmann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Manoj</namePart>
<namePart type="given">Prabhakar</namePart>
<namePart type="family">Kannan Ravi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gerard</namePart>
<namePart type="family">de Melo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EACL 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Andreas</namePart>
<namePart type="family">Vlachos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isabelle</namePart>
<namePart type="family">Augenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dubrovnik, Croatia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Link prediction models based on factual knowledge graphs are commonly used in applications such as search and question answering. However, work investigating social bias in these models has been limited. Previous work focused on knowledge graph embeddings, so more recent classes of models achieving superior results by fine-tuning Transformers have not yet been investigated. We therefore present a model-agnostic approach for bias measurement leveraging fairness metrics to compare bias in knowledge graph embedding-based predictions (KG only) with models that use pre-trained, Transformer-based language models (KG+LM). We further create a dataset to measure gender bias in occupation predictions and assess whether the KG+LM models are more or less biased than KG only models. We find that gender bias tends to be higher for the KG+LM models and analyze potential connections to the accuracy of the models and the data bias inherent in our dataset. Finally, we discuss the limitations and ethical considerations of our work. The repository containing the source code and the data set is publicly available at https://github.com/lena-schwert/comparing-bias-in-KG-models.</abstract>
<identifier type="citekey">schwertmann-etal-2023-model</identifier>
<identifier type="doi">10.18653/v1/2023.findings-eacl.121</identifier>
<location>
<url>https://aclanthology.org/2023.findings-eacl.121</url>
</location>
<part>
<date>2023-05</date>
<extent unit="page">
<start>1632</start>
<end>1648</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Model-Agnostic Bias Measurement in Link Prediction
%A Schwertmann, Lena
%A Kannan Ravi, Manoj Prabhakar
%A de Melo, Gerard
%Y Vlachos, Andreas
%Y Augenstein, Isabelle
%S Findings of the Association for Computational Linguistics: EACL 2023
%D 2023
%8 May
%I Association for Computational Linguistics
%C Dubrovnik, Croatia
%F schwertmann-etal-2023-model
%X Link prediction models based on factual knowledge graphs are commonly used in applications such as search and question answering. However, work investigating social bias in these models has been limited. Previous work focused on knowledge graph embeddings, so more recent classes of models achieving superior results by fine-tuning Transformers have not yet been investigated. We therefore present a model-agnostic approach for bias measurement leveraging fairness metrics to compare bias in knowledge graph embedding-based predictions (KG only) with models that use pre-trained, Transformer-based language models (KG+LM). We further create a dataset to measure gender bias in occupation predictions and assess whether the KG+LM models are more or less biased than KG only models. We find that gender bias tends to be higher for the KG+LM models and analyze potential connections to the accuracy of the models and the data bias inherent in our dataset. Finally, we discuss the limitations and ethical considerations of our work. The repository containing the source code and the data set is publicly available at https://github.com/lena-schwert/comparing-bias-in-KG-models.
%R 10.18653/v1/2023.findings-eacl.121
%U https://aclanthology.org/2023.findings-eacl.121
%U https://doi.org/10.18653/v1/2023.findings-eacl.121
%P 1632-1648
Markdown (Informal)
[Model-Agnostic Bias Measurement in Link Prediction](https://aclanthology.org/2023.findings-eacl.121) (Schwertmann et al., Findings 2023)
ACL
- Lena Schwertmann, Manoj Prabhakar Kannan Ravi, and Gerard de Melo. 2023. Model-Agnostic Bias Measurement in Link Prediction. In Findings of the Association for Computational Linguistics: EACL 2023, pages 1632–1648, Dubrovnik, Croatia. Association for Computational Linguistics.