@inproceedings{alshehri-etal-2026-parameter,
title = "Parameter-Efficient Adaptation of Self-Supervised Models for {A}rabic Speech Recognition",
author = "Alshehri, Wafa Mohammed and
Al-khatib, Wasfi G. and
Amro, Mohammad Ismail",
booktitle = "Proceedings of the 2nd Workshop on {NLP} for Languages Using {A}rabic Script",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2026.abjadnlp-1.40/",
pages = "322--328",
abstract = "Arabic speech recognition systems face distinct challenges due to the language{'}s complex morphology and dialectal variations. Self-supervised models (SSL) like XLS-R have shown promising results, but their size with over than 300 million of parameters, makes fine-tuning computationally expensive. In this work, we present the first comparative study of parameter-efficient fine-tuning (PEFT), specifically LoRA and DoRA, applied to XLS-R for Arabic ASR. We evaluate on the newly released Common Voice Arabic V24.0 dataset, establishing new benchmarks. Our full fine-tuning achieves state-of-the-art results among XLS-R-based models with 23.03{\%} Word Error Rate (WER). In our experiments, LoRA achieved a 36.10{\%} word error rate (WER) while training just 2{\%} of the model{'}s parameters. DoRA reached 45.20{\%} WER in initial experiments. We analyze the trade-offs between accuracy and efficiency, offering practical guidance for developing Arabic ASR systems when computational resources are limited. The models and code are publicly available."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="alshehri-etal-2026-parameter">
<titleInfo>
<title>Parameter-Efficient Adaptation of Self-Supervised Models for Arabic Speech Recognition</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wafa</namePart>
<namePart type="given">Mohammed</namePart>
<namePart type="family">Alshehri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wasfi</namePart>
<namePart type="given">G</namePart>
<namePart type="family">Al-khatib</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Ismail</namePart>
<namePart type="family">Amro</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2026-03</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Workshop on NLP for Languages Using Arabic Script</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Rabat, Morocco</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Arabic speech recognition systems face distinct challenges due to the language’s complex morphology and dialectal variations. Self-supervised models (SSL) like XLS-R have shown promising results, but their size with over than 300 million of parameters, makes fine-tuning computationally expensive. In this work, we present the first comparative study of parameter-efficient fine-tuning (PEFT), specifically LoRA and DoRA, applied to XLS-R for Arabic ASR. We evaluate on the newly released Common Voice Arabic V24.0 dataset, establishing new benchmarks. Our full fine-tuning achieves state-of-the-art results among XLS-R-based models with 23.03% Word Error Rate (WER). In our experiments, LoRA achieved a 36.10% word error rate (WER) while training just 2% of the model’s parameters. DoRA reached 45.20% WER in initial experiments. We analyze the trade-offs between accuracy and efficiency, offering practical guidance for developing Arabic ASR systems when computational resources are limited. The models and code are publicly available.</abstract>
<identifier type="citekey">alshehri-etal-2026-parameter</identifier>
<location>
<url>https://aclanthology.org/2026.abjadnlp-1.40/</url>
</location>
<part>
<date>2026-03</date>
<extent unit="page">
<start>322</start>
<end>328</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Parameter-Efficient Adaptation of Self-Supervised Models for Arabic Speech Recognition
%A Alshehri, Wafa Mohammed
%A Al-khatib, Wasfi G.
%A Amro, Mohammad Ismail
%S Proceedings of the 2nd Workshop on NLP for Languages Using Arabic Script
%D 2026
%8 March
%I Association for Computational Linguistics
%C Rabat, Morocco
%F alshehri-etal-2026-parameter
%X Arabic speech recognition systems face distinct challenges due to the language’s complex morphology and dialectal variations. Self-supervised models (SSL) like XLS-R have shown promising results, but their size with over than 300 million of parameters, makes fine-tuning computationally expensive. In this work, we present the first comparative study of parameter-efficient fine-tuning (PEFT), specifically LoRA and DoRA, applied to XLS-R for Arabic ASR. We evaluate on the newly released Common Voice Arabic V24.0 dataset, establishing new benchmarks. Our full fine-tuning achieves state-of-the-art results among XLS-R-based models with 23.03% Word Error Rate (WER). In our experiments, LoRA achieved a 36.10% word error rate (WER) while training just 2% of the model’s parameters. DoRA reached 45.20% WER in initial experiments. We analyze the trade-offs between accuracy and efficiency, offering practical guidance for developing Arabic ASR systems when computational resources are limited. The models and code are publicly available.
%U https://aclanthology.org/2026.abjadnlp-1.40/
%P 322-328
Markdown (Informal)
[Parameter-Efficient Adaptation of Self-Supervised Models for Arabic Speech Recognition](https://aclanthology.org/2026.abjadnlp-1.40/) (Alshehri et al., AbjadNLP 2026)
ACL