@inproceedings{babatunde-etal-2025-beyond,
title = "Beyond Monolingual Limits: Fine-Tuning Monolingual {ASR} for {Y}oruba-{E}nglish Code-Switching",
author = "Babatunde, Oreoluwa Boluwatife and
Olufemi, Victor Tolulope and
Bolarinwa, Emmanuel and
Moshood, Kausar Yetunde and
Emezue, Chris Chinenye",
editor = "Winata, Genta Indra and
Kar, Sudipta and
Zhukova, Marina and
Solorio, Thamar and
Ai, Xi and
Hamed, Injy and
Ihsani, Mahardika Krisna Krisna and
Wijaya, Derry Tanti and
Kuwanto, Garry",
booktitle = "Proceedings of the 7th Workshop on Computational Approaches to Linguistic Code-Switching",
month = may,
year = "2025",
address = "Albuquerque, New Mexico, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.calcs-1.3/",
doi = "10.18653/v1/2025.calcs-1.3",
pages = "18--25",
ISBN = "979-8-89176-053-0",
abstract = "Code-switching (CS) presents a significant challenge for Automatic Speech Recognition (ASR) systems, particularly in low-resource settings. While multilingual ASR models like OpenAI Whisper Large v3 are designed to handle multiple languages, their high computational demands make them less practical for real-world deployment in resource-constrained environments. In this study, we investigate the effectiveness of fine-tuning both monolingual and multilingual ASR models for Yoruba-English CS speech. Our results show that unadapted monolingual ASR models outperform Whisper Large v3 in a zero-shot setting on CS speech. Fine-tuning significantly reduces WER for both monolingual and multilingual models, with monolingual models achieving over a 20{\%} WER reduction on CS and Yoruba speech while maintaining lower computational costs. However, we observe a trade-off, as fine-tuning leads to some degradation in English recognition, particularly for multilingual models. Our findings highlight that while multilingual models benefit from fine-tuning, monolingual models provide a computationally efficient and competitive alternative for CS-ASR, making them a viable choice for resource-constrained environments."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="babatunde-etal-2025-beyond">
<titleInfo>
<title>Beyond Monolingual Limits: Fine-Tuning Monolingual ASR for Yoruba-English Code-Switching</title>
</titleInfo>
<name type="personal">
<namePart type="given">Oreoluwa</namePart>
<namePart type="given">Boluwatife</namePart>
<namePart type="family">Babatunde</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Victor</namePart>
<namePart type="given">Tolulope</namePart>
<namePart type="family">Olufemi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emmanuel</namePart>
<namePart type="family">Bolarinwa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kausar</namePart>
<namePart type="given">Yetunde</namePart>
<namePart type="family">Moshood</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chris</namePart>
<namePart type="given">Chinenye</namePart>
<namePart type="family">Emezue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 7th Workshop on Computational Approaches to Linguistic Code-Switching</title>
</titleInfo>
<name type="personal">
<namePart type="given">Genta</namePart>
<namePart type="given">Indra</namePart>
<namePart type="family">Winata</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sudipta</namePart>
<namePart type="family">Kar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marina</namePart>
<namePart type="family">Zhukova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thamar</namePart>
<namePart type="family">Solorio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xi</namePart>
<namePart type="family">Ai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Injy</namePart>
<namePart type="family">Hamed</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mahardika</namePart>
<namePart type="given">Krisna</namePart>
<namePart type="given">Krisna</namePart>
<namePart type="family">Ihsani</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Derry</namePart>
<namePart type="given">Tanti</namePart>
<namePart type="family">Wijaya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Garry</namePart>
<namePart type="family">Kuwanto</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Albuquerque, New Mexico, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-053-0</identifier>
</relatedItem>
<abstract>Code-switching (CS) presents a significant challenge for Automatic Speech Recognition (ASR) systems, particularly in low-resource settings. While multilingual ASR models like OpenAI Whisper Large v3 are designed to handle multiple languages, their high computational demands make them less practical for real-world deployment in resource-constrained environments. In this study, we investigate the effectiveness of fine-tuning both monolingual and multilingual ASR models for Yoruba-English CS speech. Our results show that unadapted monolingual ASR models outperform Whisper Large v3 in a zero-shot setting on CS speech. Fine-tuning significantly reduces WER for both monolingual and multilingual models, with monolingual models achieving over a 20% WER reduction on CS and Yoruba speech while maintaining lower computational costs. However, we observe a trade-off, as fine-tuning leads to some degradation in English recognition, particularly for multilingual models. Our findings highlight that while multilingual models benefit from fine-tuning, monolingual models provide a computationally efficient and competitive alternative for CS-ASR, making them a viable choice for resource-constrained environments.</abstract>
<identifier type="citekey">babatunde-etal-2025-beyond</identifier>
<identifier type="doi">10.18653/v1/2025.calcs-1.3</identifier>
<location>
<url>https://aclanthology.org/2025.calcs-1.3/</url>
</location>
<part>
<date>2025-05</date>
<extent unit="page">
<start>18</start>
<end>25</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Beyond Monolingual Limits: Fine-Tuning Monolingual ASR for Yoruba-English Code-Switching
%A Babatunde, Oreoluwa Boluwatife
%A Olufemi, Victor Tolulope
%A Bolarinwa, Emmanuel
%A Moshood, Kausar Yetunde
%A Emezue, Chris Chinenye
%Y Winata, Genta Indra
%Y Kar, Sudipta
%Y Zhukova, Marina
%Y Solorio, Thamar
%Y Ai, Xi
%Y Hamed, Injy
%Y Ihsani, Mahardika Krisna Krisna
%Y Wijaya, Derry Tanti
%Y Kuwanto, Garry
%S Proceedings of the 7th Workshop on Computational Approaches to Linguistic Code-Switching
%D 2025
%8 May
%I Association for Computational Linguistics
%C Albuquerque, New Mexico, USA
%@ 979-8-89176-053-0
%F babatunde-etal-2025-beyond
%X Code-switching (CS) presents a significant challenge for Automatic Speech Recognition (ASR) systems, particularly in low-resource settings. While multilingual ASR models like OpenAI Whisper Large v3 are designed to handle multiple languages, their high computational demands make them less practical for real-world deployment in resource-constrained environments. In this study, we investigate the effectiveness of fine-tuning both monolingual and multilingual ASR models for Yoruba-English CS speech. Our results show that unadapted monolingual ASR models outperform Whisper Large v3 in a zero-shot setting on CS speech. Fine-tuning significantly reduces WER for both monolingual and multilingual models, with monolingual models achieving over a 20% WER reduction on CS and Yoruba speech while maintaining lower computational costs. However, we observe a trade-off, as fine-tuning leads to some degradation in English recognition, particularly for multilingual models. Our findings highlight that while multilingual models benefit from fine-tuning, monolingual models provide a computationally efficient and competitive alternative for CS-ASR, making them a viable choice for resource-constrained environments.
%R 10.18653/v1/2025.calcs-1.3
%U https://aclanthology.org/2025.calcs-1.3/
%U https://doi.org/10.18653/v1/2025.calcs-1.3
%P 18-25
Markdown (Informal)
[Beyond Monolingual Limits: Fine-Tuning Monolingual ASR for Yoruba-English Code-Switching](https://aclanthology.org/2025.calcs-1.3/) (Babatunde et al., CALCS 2025)
ACL