@inproceedings{alghaslan-almutairy-2024-mgkm,
title = "{MGKM} at {S}tance{E}val2024 Fine-Tuning Large Language Models for {A}rabic Stance Detection",
author = "Alghaslan, Mamoun and
Almutairy, Khaled",
editor = "Habash, Nizar and
Bouamor, Houda and
Eskander, Ramy and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Abdelali, Ahmed and
Touileb, Samia and
Hamed, Injy and
Onaizan, Yaser and
Alhafni, Bashar and
Antoun, Wissam and
Khalifa, Salam and
Haddad, Hatem and
Zitouni, Imed and
AlKhamissi, Badr and
Almatham, Rawan and
Mrini, Khalil",
booktitle = "Proceedings of The Second Arabic Natural Language Processing Conference",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.arabicnlp-1.95",
doi = "10.18653/v1/2024.arabicnlp-1.95",
pages = "816--822",
abstract = "Social media platforms have become essential in daily life, enabling users to express their opinions and stances on various topics. Stance detection, which identifies the viewpoint expressed in text toward a target, has predominantly focused on English. MAWQIF is the pioneering Arabic dataset for target-specific stance detection, consisting of 4,121 tweets annotated with stance, sentiment, and sarcasm. The original dataset, benchmarked on four BERT-based models, achieved a best macro-F1 score of 78.89, indicating significant room for improvement. This study evaluates the effectiveness of three Large Language Models (LLMs) in detecting target-specific stances in MAWQIF. The LLMs assessed are ChatGPT-3.5-turbo, Meta-Llama-3-8B-Instruct, and Falcon-7B-Instruct. Performance was measured using both zero-shot and full fine-tuning approaches. Our findings demonstrate that fine-tuning substantially enhances the stance detection capabilities of LLMs in Arabic tweets. Notably, GPT-3.5-Turbo achieved the highest performance with a macro-F1 score of 82.93, underscoring the potential of fine-tuned LLMs for language-specific applications.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="alghaslan-almutairy-2024-mgkm">
<titleInfo>
<title>MGKM at StanceEval2024 Fine-Tuning Large Language Models for Arabic Stance Detection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mamoun</namePart>
<namePart type="family">Alghaslan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Khaled</namePart>
<namePart type="family">Almutairy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of The Second Arabic Natural Language Processing Conference</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nizar</namePart>
<namePart type="family">Habash</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ramy</namePart>
<namePart type="family">Eskander</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nadi</namePart>
<namePart type="family">Tomeh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ibrahim</namePart>
<namePart type="family">Abu Farha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ahmed</namePart>
<namePart type="family">Abdelali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Samia</namePart>
<namePart type="family">Touileb</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Injy</namePart>
<namePart type="family">Hamed</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bashar</namePart>
<namePart type="family">Alhafni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wissam</namePart>
<namePart type="family">Antoun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Salam</namePart>
<namePart type="family">Khalifa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hatem</namePart>
<namePart type="family">Haddad</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Imed</namePart>
<namePart type="family">Zitouni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Badr</namePart>
<namePart type="family">AlKhamissi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rawan</namePart>
<namePart type="family">Almatham</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Khalil</namePart>
<namePart type="family">Mrini</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Social media platforms have become essential in daily life, enabling users to express their opinions and stances on various topics. Stance detection, which identifies the viewpoint expressed in text toward a target, has predominantly focused on English. MAWQIF is the pioneering Arabic dataset for target-specific stance detection, consisting of 4,121 tweets annotated with stance, sentiment, and sarcasm. The original dataset, benchmarked on four BERT-based models, achieved a best macro-F1 score of 78.89, indicating significant room for improvement. This study evaluates the effectiveness of three Large Language Models (LLMs) in detecting target-specific stances in MAWQIF. The LLMs assessed are ChatGPT-3.5-turbo, Meta-Llama-3-8B-Instruct, and Falcon-7B-Instruct. Performance was measured using both zero-shot and full fine-tuning approaches. Our findings demonstrate that fine-tuning substantially enhances the stance detection capabilities of LLMs in Arabic tweets. Notably, GPT-3.5-Turbo achieved the highest performance with a macro-F1 score of 82.93, underscoring the potential of fine-tuned LLMs for language-specific applications.</abstract>
<identifier type="citekey">alghaslan-almutairy-2024-mgkm</identifier>
<identifier type="doi">10.18653/v1/2024.arabicnlp-1.95</identifier>
<location>
<url>https://aclanthology.org/2024.arabicnlp-1.95</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>816</start>
<end>822</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T MGKM at StanceEval2024 Fine-Tuning Large Language Models for Arabic Stance Detection
%A Alghaslan, Mamoun
%A Almutairy, Khaled
%Y Habash, Nizar
%Y Bouamor, Houda
%Y Eskander, Ramy
%Y Tomeh, Nadi
%Y Abu Farha, Ibrahim
%Y Abdelali, Ahmed
%Y Touileb, Samia
%Y Hamed, Injy
%Y Onaizan, Yaser
%Y Alhafni, Bashar
%Y Antoun, Wissam
%Y Khalifa, Salam
%Y Haddad, Hatem
%Y Zitouni, Imed
%Y AlKhamissi, Badr
%Y Almatham, Rawan
%Y Mrini, Khalil
%S Proceedings of The Second Arabic Natural Language Processing Conference
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F alghaslan-almutairy-2024-mgkm
%X Social media platforms have become essential in daily life, enabling users to express their opinions and stances on various topics. Stance detection, which identifies the viewpoint expressed in text toward a target, has predominantly focused on English. MAWQIF is the pioneering Arabic dataset for target-specific stance detection, consisting of 4,121 tweets annotated with stance, sentiment, and sarcasm. The original dataset, benchmarked on four BERT-based models, achieved a best macro-F1 score of 78.89, indicating significant room for improvement. This study evaluates the effectiveness of three Large Language Models (LLMs) in detecting target-specific stances in MAWQIF. The LLMs assessed are ChatGPT-3.5-turbo, Meta-Llama-3-8B-Instruct, and Falcon-7B-Instruct. Performance was measured using both zero-shot and full fine-tuning approaches. Our findings demonstrate that fine-tuning substantially enhances the stance detection capabilities of LLMs in Arabic tweets. Notably, GPT-3.5-Turbo achieved the highest performance with a macro-F1 score of 82.93, underscoring the potential of fine-tuned LLMs for language-specific applications.
%R 10.18653/v1/2024.arabicnlp-1.95
%U https://aclanthology.org/2024.arabicnlp-1.95
%U https://doi.org/10.18653/v1/2024.arabicnlp-1.95
%P 816-822
Markdown (Informal)
[MGKM at StanceEval2024 Fine-Tuning Large Language Models for Arabic Stance Detection](https://aclanthology.org/2024.arabicnlp-1.95) (Alghaslan & Almutairy, ArabicNLP-WS 2024)
ACL