@inproceedings{al-sahili-etal-2025-breaking,
title = "Breaking Language Barriers or Reinforcing Bias? A Study of Gender and Racial Disparities in Multilingual Contrastive Vision Language Models",
author = "Al Sahili, Zahraa and
Patras, Ioannis and
Purver, Matthew",
editor = "Inui, Kentaro and
Sakti, Sakriani and
Wang, Haofen and
Wong, Derek F. and
Bhattacharyya, Pushpak and
Banerjee, Biplab and
Ekbal, Asif and
Chakraborty, Tanmoy and
Singh, Dhirendra Pratap",
booktitle = "Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "The Asian Federation of Natural Language Processing and The Association for Computational Linguistics",
url = "https://aclanthology.org/2025.ijcnlp-long.20/",
pages = "331--352",
ISBN = "979-8-89176-298-5",
abstract = "Multilingual vision{--}language models (VLMs) promise universal image{--}text retrieval, yet their social biases remain under{-}explored.We perform the first systematic audit of four public multilingual CLIP variants{---}M{-}CLIP, NLLB{-}CLIP, CAPIVARA{-}CLIP, and the debiased SigLIP{-}2{---}covering ten languages that differ in resource availability and morphological gender marking.Using balanced subsets of FairFace and the PATA stereotype suite in a zero{-}shot setting, we quantify race and gender bias and measure stereotype amplification.Contrary to the intuition that multilinguality mitigates bias, \textit{every} model exhibits stronger gender skew than its English{-}only baseline.CAPIVARA{-}CLIP shows its largest biases precisely in the low{-}resource languages it targets, while the shared encoder of NLLB{-}CLIP and SigLIP{-}2 transfers English gender stereotypes into gender{-}neutral languages; loosely coupled encoders largely avoid this leakage.Although SigLIP{-}2 reduces agency and communion skews, it inherits{---}and in caption{-}sparse contexts (e.g., Xhosa) amplifies{---}the English anchor{'}s crime associations.Highly gendered languages consistently magnify all bias types, yet gender{-}neutral languages remain vulnerable whenever cross{-}lingual weight sharing imports foreign stereotypes.Aggregated metrics thus mask language{-}specific ``hot spots,'' underscoring the need for fine{-}grained, language{-}aware bias evaluation in future multilingual VLM research."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="al-sahili-etal-2025-breaking">
<titleInfo>
<title>Breaking Language Barriers or Reinforcing Bias? A Study of Gender and Racial Disparities in Multilingual Contrastive Vision Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zahraa</namePart>
<namePart type="family">Al Sahili</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ioannis</namePart>
<namePart type="family">Patras</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Matthew</namePart>
<namePart type="family">Purver</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kentaro</namePart>
<namePart type="family">Inui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sakriani</namePart>
<namePart type="family">Sakti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Haofen</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Derek</namePart>
<namePart type="given">F</namePart>
<namePart type="family">Wong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pushpak</namePart>
<namePart type="family">Bhattacharyya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Biplab</namePart>
<namePart type="family">Banerjee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Asif</namePart>
<namePart type="family">Ekbal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dhirendra</namePart>
<namePart type="given">Pratap</namePart>
<namePart type="family">Singh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>The Asian Federation of Natural Language Processing and The Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mumbai, India</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-298-5</identifier>
</relatedItem>
<abstract>Multilingual vision–language models (VLMs) promise universal image–text retrieval, yet their social biases remain under-explored.We perform the first systematic audit of four public multilingual CLIP variants—M-CLIP, NLLB-CLIP, CAPIVARA-CLIP, and the debiased SigLIP-2—covering ten languages that differ in resource availability and morphological gender marking.Using balanced subsets of FairFace and the PATA stereotype suite in a zero-shot setting, we quantify race and gender bias and measure stereotype amplification.Contrary to the intuition that multilinguality mitigates bias, every model exhibits stronger gender skew than its English-only baseline.CAPIVARA-CLIP shows its largest biases precisely in the low-resource languages it targets, while the shared encoder of NLLB-CLIP and SigLIP-2 transfers English gender stereotypes into gender-neutral languages; loosely coupled encoders largely avoid this leakage.Although SigLIP-2 reduces agency and communion skews, it inherits—and in caption-sparse contexts (e.g., Xhosa) amplifies—the English anchor’s crime associations.Highly gendered languages consistently magnify all bias types, yet gender-neutral languages remain vulnerable whenever cross-lingual weight sharing imports foreign stereotypes.Aggregated metrics thus mask language-specific “hot spots,” underscoring the need for fine-grained, language-aware bias evaluation in future multilingual VLM research.</abstract>
<identifier type="citekey">al-sahili-etal-2025-breaking</identifier>
<location>
<url>https://aclanthology.org/2025.ijcnlp-long.20/</url>
</location>
<part>
<date>2025-12</date>
<extent unit="page">
<start>331</start>
<end>352</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Breaking Language Barriers or Reinforcing Bias? A Study of Gender and Racial Disparities in Multilingual Contrastive Vision Language Models
%A Al Sahili, Zahraa
%A Patras, Ioannis
%A Purver, Matthew
%Y Inui, Kentaro
%Y Sakti, Sakriani
%Y Wang, Haofen
%Y Wong, Derek F.
%Y Bhattacharyya, Pushpak
%Y Banerjee, Biplab
%Y Ekbal, Asif
%Y Chakraborty, Tanmoy
%Y Singh, Dhirendra Pratap
%S Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics
%D 2025
%8 December
%I The Asian Federation of Natural Language Processing and The Association for Computational Linguistics
%C Mumbai, India
%@ 979-8-89176-298-5
%F al-sahili-etal-2025-breaking
%X Multilingual vision–language models (VLMs) promise universal image–text retrieval, yet their social biases remain under-explored.We perform the first systematic audit of four public multilingual CLIP variants—M-CLIP, NLLB-CLIP, CAPIVARA-CLIP, and the debiased SigLIP-2—covering ten languages that differ in resource availability and morphological gender marking.Using balanced subsets of FairFace and the PATA stereotype suite in a zero-shot setting, we quantify race and gender bias and measure stereotype amplification.Contrary to the intuition that multilinguality mitigates bias, every model exhibits stronger gender skew than its English-only baseline.CAPIVARA-CLIP shows its largest biases precisely in the low-resource languages it targets, while the shared encoder of NLLB-CLIP and SigLIP-2 transfers English gender stereotypes into gender-neutral languages; loosely coupled encoders largely avoid this leakage.Although SigLIP-2 reduces agency and communion skews, it inherits—and in caption-sparse contexts (e.g., Xhosa) amplifies—the English anchor’s crime associations.Highly gendered languages consistently magnify all bias types, yet gender-neutral languages remain vulnerable whenever cross-lingual weight sharing imports foreign stereotypes.Aggregated metrics thus mask language-specific “hot spots,” underscoring the need for fine-grained, language-aware bias evaluation in future multilingual VLM research.
%U https://aclanthology.org/2025.ijcnlp-long.20/
%P 331-352
Markdown (Informal)
[Breaking Language Barriers or Reinforcing Bias? A Study of Gender and Racial Disparities in Multilingual Contrastive Vision Language Models](https://aclanthology.org/2025.ijcnlp-long.20/) (Al Sahili et al., IJCNLP-AACL 2025)
ACL