@inproceedings{pedersen-etal-2023-investigating,
title = "Investigating anatomical bias in clinical machine learning algorithms",
author = "Pedersen, Jannik and
Laursen, Martin and
Vinholt, Pernille and
Alnor, Anne and
Savarimuthu, Thiusius",
editor = "Vlachos, Andreas and
Augenstein, Isabelle",
booktitle = "Findings of the Association for Computational Linguistics: EACL 2023",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-eacl.103",
doi = "10.18653/v1/2023.findings-eacl.103",
pages = "1398--1410",
abstract = "Clinical machine learning algorithms have shown promising results and could potentially be implemented in clinical practice to provide diagnosis support and improve patient treatment. Barriers for realisation of the algorithms{'} full potential include bias which is systematic and unfair discrimination against certain individuals in favor of others. The objective of this work is to measure anatomical bias in clinical text algorithms. We define anatomical bias as unfair algorithmic outcomes against patients with medical conditions in specific anatomical locations. We measure the degree of anatomical bias across two machine learning models and two Danish clinical text classification tasks, and find that clinical text algorithms are highly prone to anatomical bias. We argue that datasets for creating clinical text algorithms should be curated carefully to isolate the effect of anatomical location in order to avoid bias against patient subgroups.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="pedersen-etal-2023-investigating">
<titleInfo>
<title>Investigating anatomical bias in clinical machine learning algorithms</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jannik</namePart>
<namePart type="family">Pedersen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Martin</namePart>
<namePart type="family">Laursen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pernille</namePart>
<namePart type="family">Vinholt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anne</namePart>
<namePart type="family">Alnor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thiusius</namePart>
<namePart type="family">Savarimuthu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EACL 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Andreas</namePart>
<namePart type="family">Vlachos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isabelle</namePart>
<namePart type="family">Augenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dubrovnik, Croatia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Clinical machine learning algorithms have shown promising results and could potentially be implemented in clinical practice to provide diagnosis support and improve patient treatment. Barriers for realisation of the algorithms’ full potential include bias which is systematic and unfair discrimination against certain individuals in favor of others. The objective of this work is to measure anatomical bias in clinical text algorithms. We define anatomical bias as unfair algorithmic outcomes against patients with medical conditions in specific anatomical locations. We measure the degree of anatomical bias across two machine learning models and two Danish clinical text classification tasks, and find that clinical text algorithms are highly prone to anatomical bias. We argue that datasets for creating clinical text algorithms should be curated carefully to isolate the effect of anatomical location in order to avoid bias against patient subgroups.</abstract>
<identifier type="citekey">pedersen-etal-2023-investigating</identifier>
<identifier type="doi">10.18653/v1/2023.findings-eacl.103</identifier>
<location>
<url>https://aclanthology.org/2023.findings-eacl.103</url>
</location>
<part>
<date>2023-05</date>
<extent unit="page">
<start>1398</start>
<end>1410</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Investigating anatomical bias in clinical machine learning algorithms
%A Pedersen, Jannik
%A Laursen, Martin
%A Vinholt, Pernille
%A Alnor, Anne
%A Savarimuthu, Thiusius
%Y Vlachos, Andreas
%Y Augenstein, Isabelle
%S Findings of the Association for Computational Linguistics: EACL 2023
%D 2023
%8 May
%I Association for Computational Linguistics
%C Dubrovnik, Croatia
%F pedersen-etal-2023-investigating
%X Clinical machine learning algorithms have shown promising results and could potentially be implemented in clinical practice to provide diagnosis support and improve patient treatment. Barriers for realisation of the algorithms’ full potential include bias which is systematic and unfair discrimination against certain individuals in favor of others. The objective of this work is to measure anatomical bias in clinical text algorithms. We define anatomical bias as unfair algorithmic outcomes against patients with medical conditions in specific anatomical locations. We measure the degree of anatomical bias across two machine learning models and two Danish clinical text classification tasks, and find that clinical text algorithms are highly prone to anatomical bias. We argue that datasets for creating clinical text algorithms should be curated carefully to isolate the effect of anatomical location in order to avoid bias against patient subgroups.
%R 10.18653/v1/2023.findings-eacl.103
%U https://aclanthology.org/2023.findings-eacl.103
%U https://doi.org/10.18653/v1/2023.findings-eacl.103
%P 1398-1410
Markdown (Informal)
[Investigating anatomical bias in clinical machine learning algorithms](https://aclanthology.org/2023.findings-eacl.103) (Pedersen et al., Findings 2023)
ACL