@inproceedings{ranaldi-pucci-2025-multilingual,
title = "Multilingual Reasoning via Self-training",
author = "Ranaldi, Leonardo and
Pucci, Giulia",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.naacl-long.577/",
doi = "10.18653/v1/2025.naacl-long.577",
pages = "11566--11582",
ISBN = "979-8-89176-189-6",
abstract = "Although reasoning is innately language-agnostic, the multilingual capacities remains a significant challenge for large language models (LLMs). Their ability to generate structured, step-wise explanations is constantly restricted to dominant languages in pre-training data, making cross-lingual generalisation difficult and hindering broader global adoption. Recent works have introduced eclectic strategies to improve reasoning beyond English; however, these methods remain related to specific language that is not always optimal for reasoning.To improve LLMs' multilingual reasoning abilities, we propose a modular approach that instructs the models to structure reasoning passages in a different problem space and then self-refine their capabilities to deliver step-wise reasoning passages that lead to the solution. Experiments show that our approach stably achieves significant improvements in the multilingual reasoning of various models and task, with improved reasoning consistency across languages."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ranaldi-pucci-2025-multilingual">
<titleInfo>
<title>Multilingual Reasoning via Self-training</title>
</titleInfo>
<name type="personal">
<namePart type="given">Leonardo</namePart>
<namePart type="family">Ranaldi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Giulia</namePart>
<namePart type="family">Pucci</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Luis</namePart>
<namePart type="family">Chiruzzo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alan</namePart>
<namePart type="family">Ritter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lu</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Albuquerque, New Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-189-6</identifier>
</relatedItem>
<abstract>Although reasoning is innately language-agnostic, the multilingual capacities remains a significant challenge for large language models (LLMs). Their ability to generate structured, step-wise explanations is constantly restricted to dominant languages in pre-training data, making cross-lingual generalisation difficult and hindering broader global adoption. Recent works have introduced eclectic strategies to improve reasoning beyond English; however, these methods remain related to specific language that is not always optimal for reasoning.To improve LLMs’ multilingual reasoning abilities, we propose a modular approach that instructs the models to structure reasoning passages in a different problem space and then self-refine their capabilities to deliver step-wise reasoning passages that lead to the solution. Experiments show that our approach stably achieves significant improvements in the multilingual reasoning of various models and task, with improved reasoning consistency across languages.</abstract>
<identifier type="citekey">ranaldi-pucci-2025-multilingual</identifier>
<identifier type="doi">10.18653/v1/2025.naacl-long.577</identifier>
<location>
<url>https://aclanthology.org/2025.naacl-long.577/</url>
</location>
<part>
<date>2025-04</date>
<extent unit="page">
<start>11566</start>
<end>11582</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Multilingual Reasoning via Self-training
%A Ranaldi, Leonardo
%A Pucci, Giulia
%Y Chiruzzo, Luis
%Y Ritter, Alan
%Y Wang, Lu
%S Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)
%D 2025
%8 April
%I Association for Computational Linguistics
%C Albuquerque, New Mexico
%@ 979-8-89176-189-6
%F ranaldi-pucci-2025-multilingual
%X Although reasoning is innately language-agnostic, the multilingual capacities remains a significant challenge for large language models (LLMs). Their ability to generate structured, step-wise explanations is constantly restricted to dominant languages in pre-training data, making cross-lingual generalisation difficult and hindering broader global adoption. Recent works have introduced eclectic strategies to improve reasoning beyond English; however, these methods remain related to specific language that is not always optimal for reasoning.To improve LLMs’ multilingual reasoning abilities, we propose a modular approach that instructs the models to structure reasoning passages in a different problem space and then self-refine their capabilities to deliver step-wise reasoning passages that lead to the solution. Experiments show that our approach stably achieves significant improvements in the multilingual reasoning of various models and task, with improved reasoning consistency across languages.
%R 10.18653/v1/2025.naacl-long.577
%U https://aclanthology.org/2025.naacl-long.577/
%U https://doi.org/10.18653/v1/2025.naacl-long.577
%P 11566-11582
Markdown (Informal)
[Multilingual Reasoning via Self-training](https://aclanthology.org/2025.naacl-long.577/) (Ranaldi & Pucci, NAACL 2025)
ACL
- Leonardo Ranaldi and Giulia Pucci. 2025. Multilingual Reasoning via Self-training. In Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 11566–11582, Albuquerque, New Mexico. Association for Computational Linguistics.