@inproceedings{goworek-dubossarsky-2025-multilinguality,
title = "Multilinguality Does not Make Sense: Investigating Factors Behind Zero-Shot Cross-Lingual Transfer in Sense-Aware Tasks",
author = "Goworek, Roksana and
Dubossarsky, Haim",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.emnlp-main.1773/",
pages = "34992--35017",
ISBN = "979-8-89176-332-6",
abstract = "Cross-lingual transfer allows models to perform tasks in languages unseen during training and is often assumed to benefit from increased multilinguality. In this work, we challenge this assumption in the context of two underexplored, sense-aware tasks: polysemy disambiguation and lexical semantic change. Through a large-scale analysis across 28 languages, we show that multilingual training is neither necessary nor inherently beneficial for effective transfer. Instead, we find that confounding factors, such as fine-tuning data composition and evaluation artifacts, can better account for the perceived advantages of multilinguality. Our findings call for more rigorous evaluations in multilingual NLP, and more nuanced and sensible choice of models for transfer. We release fine-tuned models and benchmarks to support further research, with implications extending to low-resource and typologically diverse languages."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="goworek-dubossarsky-2025-multilinguality">
<titleInfo>
<title>Multilinguality Does not Make Sense: Investigating Factors Behind Zero-Shot Cross-Lingual Transfer in Sense-Aware Tasks</title>
</titleInfo>
<name type="personal">
<namePart type="given">Roksana</namePart>
<namePart type="family">Goworek</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Haim</namePart>
<namePart type="family">Dubossarsky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-332-6</identifier>
</relatedItem>
<abstract>Cross-lingual transfer allows models to perform tasks in languages unseen during training and is often assumed to benefit from increased multilinguality. In this work, we challenge this assumption in the context of two underexplored, sense-aware tasks: polysemy disambiguation and lexical semantic change. Through a large-scale analysis across 28 languages, we show that multilingual training is neither necessary nor inherently beneficial for effective transfer. Instead, we find that confounding factors, such as fine-tuning data composition and evaluation artifacts, can better account for the perceived advantages of multilinguality. Our findings call for more rigorous evaluations in multilingual NLP, and more nuanced and sensible choice of models for transfer. We release fine-tuned models and benchmarks to support further research, with implications extending to low-resource and typologically diverse languages.</abstract>
<identifier type="citekey">goworek-dubossarsky-2025-multilinguality</identifier>
<location>
<url>https://aclanthology.org/2025.emnlp-main.1773/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>34992</start>
<end>35017</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Multilinguality Does not Make Sense: Investigating Factors Behind Zero-Shot Cross-Lingual Transfer in Sense-Aware Tasks
%A Goworek, Roksana
%A Dubossarsky, Haim
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-332-6
%F goworek-dubossarsky-2025-multilinguality
%X Cross-lingual transfer allows models to perform tasks in languages unseen during training and is often assumed to benefit from increased multilinguality. In this work, we challenge this assumption in the context of two underexplored, sense-aware tasks: polysemy disambiguation and lexical semantic change. Through a large-scale analysis across 28 languages, we show that multilingual training is neither necessary nor inherently beneficial for effective transfer. Instead, we find that confounding factors, such as fine-tuning data composition and evaluation artifacts, can better account for the perceived advantages of multilinguality. Our findings call for more rigorous evaluations in multilingual NLP, and more nuanced and sensible choice of models for transfer. We release fine-tuned models and benchmarks to support further research, with implications extending to low-resource and typologically diverse languages.
%U https://aclanthology.org/2025.emnlp-main.1773/
%P 34992-35017
Markdown (Informal)
[Multilinguality Does not Make Sense: Investigating Factors Behind Zero-Shot Cross-Lingual Transfer in Sense-Aware Tasks](https://aclanthology.org/2025.emnlp-main.1773/) (Goworek & Dubossarsky, EMNLP 2025)
ACL