@inproceedings{ragab-etal-2025-multilingual,
title = "Multilingual Propaganda Detection: Exploring Transformer-Based Models m{BERT}, {XLM}-{R}o{BERT}a, and m{T}5",
author = "Ragab, Mohamed Ibrahim and
Mohamed, Ensaf Hussein and
Medhat, Walaa",
editor = "Jarrar, Mustafa and
Habash, Habash and
El-Haj, Mo",
booktitle = "Proceedings of the first International Workshop on Nakba Narratives as Language Resources",
month = jan,
year = "2025",
address = "Abu Dhabi",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.nakbanlp-1.9/",
pages = "75--82",
abstract = "This research investigates multilingual propaganda detection by employing transformer-based models, specifically mBERT, XLM-RoBERTa, and mT5. The study utilizes a balanced dataset from the BiasFigNews corpus, annotated for propaganda and bias across five languages. The models were finely tuned to generate embeddings for classification tasks. The evaluation revealed mT5 as the most effective model, achieving an accuracy of 99.61{\%} and an F1-score of 0.9961, followed by mBERT and XLM-RoBERTa with accuracies of 92{\%} and 91.41{\%}, respectively. The findings demonstrate the efficacy of transformer-based embeddings in detecting propaganda while also highlighting challenges in subtle class distinctions. Future work aims to enhance cross-lingual adaptability and explore lightweight models for resource-constrained settings."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ragab-etal-2025-multilingual">
<titleInfo>
<title>Multilingual Propaganda Detection: Exploring Transformer-Based Models mBERT, XLM-RoBERTa, and mT5</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mohamed</namePart>
<namePart type="given">Ibrahim</namePart>
<namePart type="family">Ragab</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ensaf</namePart>
<namePart type="given">Hussein</namePart>
<namePart type="family">Mohamed</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Walaa</namePart>
<namePart type="family">Medhat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-01</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the first International Workshop on Nakba Narratives as Language Resources</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mustafa</namePart>
<namePart type="family">Jarrar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Habash</namePart>
<namePart type="family">Habash</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mo</namePart>
<namePart type="family">El-Haj</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This research investigates multilingual propaganda detection by employing transformer-based models, specifically mBERT, XLM-RoBERTa, and mT5. The study utilizes a balanced dataset from the BiasFigNews corpus, annotated for propaganda and bias across five languages. The models were finely tuned to generate embeddings for classification tasks. The evaluation revealed mT5 as the most effective model, achieving an accuracy of 99.61% and an F1-score of 0.9961, followed by mBERT and XLM-RoBERTa with accuracies of 92% and 91.41%, respectively. The findings demonstrate the efficacy of transformer-based embeddings in detecting propaganda while also highlighting challenges in subtle class distinctions. Future work aims to enhance cross-lingual adaptability and explore lightweight models for resource-constrained settings.</abstract>
<identifier type="citekey">ragab-etal-2025-multilingual</identifier>
<location>
<url>https://aclanthology.org/2025.nakbanlp-1.9/</url>
</location>
<part>
<date>2025-01</date>
<extent unit="page">
<start>75</start>
<end>82</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Multilingual Propaganda Detection: Exploring Transformer-Based Models mBERT, XLM-RoBERTa, and mT5
%A Ragab, Mohamed Ibrahim
%A Mohamed, Ensaf Hussein
%A Medhat, Walaa
%Y Jarrar, Mustafa
%Y Habash, Habash
%Y El-Haj, Mo
%S Proceedings of the first International Workshop on Nakba Narratives as Language Resources
%D 2025
%8 January
%I Association for Computational Linguistics
%C Abu Dhabi
%F ragab-etal-2025-multilingual
%X This research investigates multilingual propaganda detection by employing transformer-based models, specifically mBERT, XLM-RoBERTa, and mT5. The study utilizes a balanced dataset from the BiasFigNews corpus, annotated for propaganda and bias across five languages. The models were finely tuned to generate embeddings for classification tasks. The evaluation revealed mT5 as the most effective model, achieving an accuracy of 99.61% and an F1-score of 0.9961, followed by mBERT and XLM-RoBERTa with accuracies of 92% and 91.41%, respectively. The findings demonstrate the efficacy of transformer-based embeddings in detecting propaganda while also highlighting challenges in subtle class distinctions. Future work aims to enhance cross-lingual adaptability and explore lightweight models for resource-constrained settings.
%U https://aclanthology.org/2025.nakbanlp-1.9/
%P 75-82
Markdown (Informal)
[Multilingual Propaganda Detection: Exploring Transformer-Based Models mBERT, XLM-RoBERTa, and mT5](https://aclanthology.org/2025.nakbanlp-1.9/) (Ragab et al., NakbaNLP 2025)
ACL