@inproceedings{ghanim-etal-2024-jailbreaking,
title = "Jailbreaking {LLM}s with {A}rabic Transliteration and {A}rabizi",
author = "Ghanim, Mansour Al and
Almohaimeed, Saleh and
Zheng, Mengxin and
Solihin, Yan and
Lou, Qian",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.emnlp-main.1034",
doi = "10.18653/v1/2024.emnlp-main.1034",
pages = "18584--18600",
abstract = "This study identifies the potential vulnerabilities of Large Language Models (LLMs) to {`}jailbreak{'} attacks, specifically focusing on the Arabic language and its various forms. While most research has concentrated on English-based prompt manipulation, our investigation broadens the scope to investigate the Arabic language. We initially tested the AdvBench benchmark in Standardized Arabic, finding that even with prompt manipulation techniques like prefix injection, it was insufficient to provoke LLMs into generating unsafe content. However, when using Arabic transliteration and chatspeak (or arabizi), we found that unsafe content could be produced on platforms like OpenAI GPT-4 and Anthropic Claude 3 Sonnet. Our findings suggest that using Arabic and its various forms could expose information that might remain hidden, potentially increasing the risk of jailbreak attacks. We hypothesize that this exposure could be due to the model{'}s learned connection to specific words, highlighting the need for more comprehensive safety training across all language forms.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ghanim-etal-2024-jailbreaking">
<titleInfo>
<title>Jailbreaking LLMs with Arabic Transliteration and Arabizi</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mansour</namePart>
<namePart type="given">Al</namePart>
<namePart type="family">Ghanim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saleh</namePart>
<namePart type="family">Almohaimeed</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mengxin</namePart>
<namePart type="family">Zheng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yan</namePart>
<namePart type="family">Solihin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qian</namePart>
<namePart type="family">Lou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This study identifies the potential vulnerabilities of Large Language Models (LLMs) to ‘jailbreak’ attacks, specifically focusing on the Arabic language and its various forms. While most research has concentrated on English-based prompt manipulation, our investigation broadens the scope to investigate the Arabic language. We initially tested the AdvBench benchmark in Standardized Arabic, finding that even with prompt manipulation techniques like prefix injection, it was insufficient to provoke LLMs into generating unsafe content. However, when using Arabic transliteration and chatspeak (or arabizi), we found that unsafe content could be produced on platforms like OpenAI GPT-4 and Anthropic Claude 3 Sonnet. Our findings suggest that using Arabic and its various forms could expose information that might remain hidden, potentially increasing the risk of jailbreak attacks. We hypothesize that this exposure could be due to the model’s learned connection to specific words, highlighting the need for more comprehensive safety training across all language forms.</abstract>
<identifier type="citekey">ghanim-etal-2024-jailbreaking</identifier>
<identifier type="doi">10.18653/v1/2024.emnlp-main.1034</identifier>
<location>
<url>https://aclanthology.org/2024.emnlp-main.1034</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>18584</start>
<end>18600</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Jailbreaking LLMs with Arabic Transliteration and Arabizi
%A Ghanim, Mansour Al
%A Almohaimeed, Saleh
%A Zheng, Mengxin
%A Solihin, Yan
%A Lou, Qian
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F ghanim-etal-2024-jailbreaking
%X This study identifies the potential vulnerabilities of Large Language Models (LLMs) to ‘jailbreak’ attacks, specifically focusing on the Arabic language and its various forms. While most research has concentrated on English-based prompt manipulation, our investigation broadens the scope to investigate the Arabic language. We initially tested the AdvBench benchmark in Standardized Arabic, finding that even with prompt manipulation techniques like prefix injection, it was insufficient to provoke LLMs into generating unsafe content. However, when using Arabic transliteration and chatspeak (or arabizi), we found that unsafe content could be produced on platforms like OpenAI GPT-4 and Anthropic Claude 3 Sonnet. Our findings suggest that using Arabic and its various forms could expose information that might remain hidden, potentially increasing the risk of jailbreak attacks. We hypothesize that this exposure could be due to the model’s learned connection to specific words, highlighting the need for more comprehensive safety training across all language forms.
%R 10.18653/v1/2024.emnlp-main.1034
%U https://aclanthology.org/2024.emnlp-main.1034
%U https://doi.org/10.18653/v1/2024.emnlp-main.1034
%P 18584-18600
Markdown (Informal)
[Jailbreaking LLMs with Arabic Transliteration and Arabizi](https://aclanthology.org/2024.emnlp-main.1034) (Ghanim et al., EMNLP 2024)
ACL
- Mansour Al Ghanim, Saleh Almohaimeed, Mengxin Zheng, Yan Solihin, and Qian Lou. 2024. Jailbreaking LLMs with Arabic Transliteration and Arabizi. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 18584–18600, Miami, Florida, USA. Association for Computational Linguistics.