@inproceedings{zhang-etal-2025-english,
title = "{E}nglish as Defense Proxy: Mitigating Multilingual Jailbreak via Eliciting {E}nglish Safety Knowledge",
author = "Zhang, Zekai and
Guo, Yiduo and
Lin, Jiuheng and
Quan, Shanghaoran and
Zhang, Huishuai and
Zhao, Dongyan",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.62/",
pages = "1185--1196",
ISBN = "979-8-89176-335-7",
abstract = "Large language models (LLMs) excel in many tasks, but their safety guarantees vary by language, e.g., responses in English tend to be safer than those in low-resource languages. This inconsistency creates a vulnerability, since an attacker can circumvent safety measures by using a less-supported language as an intermediary, even without fluency in that language. Traditional solutions rely on multilingual safety alignment, which demands vast, per-language datasets and introduces significant trade-offs between usefulness and safety (the so-called ``alignment tax''). To overcome these limitations, we introduce \textit{English as Defense Proxy (E-Proxy)}, a unified approach that leverages English, usually the advantage language of LLMs, as a universal safety anchor. During multilingual training, E-Proxy uses English jailbreak prompts to extract the model{'}s existing safety knowledge, then applies simple language-mapping prompts (e.g., ``Please answer in target language'') to transfer that knowledge across languages. Our analysis shows that formulating prompts in a high-resource language preserves the model{'}s utility, while enforcing responses in the target language significantly enhances safety. We evaluate E-Proxy on extensive benchmarks of both attack resistance and task performance. On the MultiJail benchmark, E-Proxy blocks over 99 {\%} of jailbreak attempts while retaining 95 {\%} of average task performance, all with a simply constructed multilingual alignment data."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhang-etal-2025-english">
<titleInfo>
<title>English as Defense Proxy: Mitigating Multilingual Jailbreak via Eliciting English Safety Knowledge</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zekai</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yiduo</namePart>
<namePart type="family">Guo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiuheng</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shanghaoran</namePart>
<namePart type="family">Quan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Huishuai</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dongyan</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>Large language models (LLMs) excel in many tasks, but their safety guarantees vary by language, e.g., responses in English tend to be safer than those in low-resource languages. This inconsistency creates a vulnerability, since an attacker can circumvent safety measures by using a less-supported language as an intermediary, even without fluency in that language. Traditional solutions rely on multilingual safety alignment, which demands vast, per-language datasets and introduces significant trade-offs between usefulness and safety (the so-called “alignment tax”). To overcome these limitations, we introduce English as Defense Proxy (E-Proxy), a unified approach that leverages English, usually the advantage language of LLMs, as a universal safety anchor. During multilingual training, E-Proxy uses English jailbreak prompts to extract the model’s existing safety knowledge, then applies simple language-mapping prompts (e.g., “Please answer in target language”) to transfer that knowledge across languages. Our analysis shows that formulating prompts in a high-resource language preserves the model’s utility, while enforcing responses in the target language significantly enhances safety. We evaluate E-Proxy on extensive benchmarks of both attack resistance and task performance. On the MultiJail benchmark, E-Proxy blocks over 99 % of jailbreak attempts while retaining 95 % of average task performance, all with a simply constructed multilingual alignment data.</abstract>
<identifier type="citekey">zhang-etal-2025-english</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.62/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>1185</start>
<end>1196</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T English as Defense Proxy: Mitigating Multilingual Jailbreak via Eliciting English Safety Knowledge
%A Zhang, Zekai
%A Guo, Yiduo
%A Lin, Jiuheng
%A Quan, Shanghaoran
%A Zhang, Huishuai
%A Zhao, Dongyan
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F zhang-etal-2025-english
%X Large language models (LLMs) excel in many tasks, but their safety guarantees vary by language, e.g., responses in English tend to be safer than those in low-resource languages. This inconsistency creates a vulnerability, since an attacker can circumvent safety measures by using a less-supported language as an intermediary, even without fluency in that language. Traditional solutions rely on multilingual safety alignment, which demands vast, per-language datasets and introduces significant trade-offs between usefulness and safety (the so-called “alignment tax”). To overcome these limitations, we introduce English as Defense Proxy (E-Proxy), a unified approach that leverages English, usually the advantage language of LLMs, as a universal safety anchor. During multilingual training, E-Proxy uses English jailbreak prompts to extract the model’s existing safety knowledge, then applies simple language-mapping prompts (e.g., “Please answer in target language”) to transfer that knowledge across languages. Our analysis shows that formulating prompts in a high-resource language preserves the model’s utility, while enforcing responses in the target language significantly enhances safety. We evaluate E-Proxy on extensive benchmarks of both attack resistance and task performance. On the MultiJail benchmark, E-Proxy blocks over 99 % of jailbreak attempts while retaining 95 % of average task performance, all with a simply constructed multilingual alignment data.
%U https://aclanthology.org/2025.findings-emnlp.62/
%P 1185-1196
Markdown (Informal)
[English as Defense Proxy: Mitigating Multilingual Jailbreak via Eliciting English Safety Knowledge](https://aclanthology.org/2025.findings-emnlp.62/) (Zhang et al., Findings 2025)
ACL