@inproceedings{nakash-etal-2025-breaking,
title = "Breaking {R}e{A}ct Agents: Foot-in-the-Door Attack Will Get You In",
author = "Nakash, Itay and
Kour, George and
Uziel, Guy and
Anaby Tavor, Ateret",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-naacl.363/",
doi = "10.18653/v1/2025.findings-naacl.363",
pages = "6484--6509",
ISBN = "979-8-89176-195-7",
abstract = "Following the advancement of large language models (LLMs), the development of LLM-based autonomous agents has become prevalent.As a result, the need to understand the security vulnerabilities of these agents has become a critical task. We examine how ReAct agents can be exploited using a straightforward yet effective method we refer to as the foot-in-the-door attack.Our experiments show that indirect prompt injection attacks, prompted by harmless and unrelated requests (such as basic calculations) can significantly increase the likelihood of the agent performing subsequent malicious actions.Our results show that once a ReAct agent{'}s thought includes a specific tool or action, the likelihood of executing this tool in the subsequent steps increases significantly, as the agent seldom re-evaluates its actions. Consequently, even random, harmless requests can establish a `foot-in-the-door', allowing an attacker to embed malicious instructions into the agent{'}s thought process, making it more susceptible to harmful directives.To mitigate this vulnerability, we propose implementing a simple reflection mechanism that prompts the agent to reassess the safety of its actions during execution, which can help reduce the success of such attacks."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="nakash-etal-2025-breaking">
<titleInfo>
<title>Breaking ReAct Agents: Foot-in-the-Door Attack Will Get You In</title>
</titleInfo>
<name type="personal">
<namePart type="given">Itay</namePart>
<namePart type="family">Nakash</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">George</namePart>
<namePart type="family">Kour</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guy</namePart>
<namePart type="family">Uziel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ateret</namePart>
<namePart type="family">Anaby Tavor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: NAACL 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Luis</namePart>
<namePart type="family">Chiruzzo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alan</namePart>
<namePart type="family">Ritter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lu</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Albuquerque, New Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-195-7</identifier>
</relatedItem>
<abstract>Following the advancement of large language models (LLMs), the development of LLM-based autonomous agents has become prevalent.As a result, the need to understand the security vulnerabilities of these agents has become a critical task. We examine how ReAct agents can be exploited using a straightforward yet effective method we refer to as the foot-in-the-door attack.Our experiments show that indirect prompt injection attacks, prompted by harmless and unrelated requests (such as basic calculations) can significantly increase the likelihood of the agent performing subsequent malicious actions.Our results show that once a ReAct agent’s thought includes a specific tool or action, the likelihood of executing this tool in the subsequent steps increases significantly, as the agent seldom re-evaluates its actions. Consequently, even random, harmless requests can establish a ‘foot-in-the-door’, allowing an attacker to embed malicious instructions into the agent’s thought process, making it more susceptible to harmful directives.To mitigate this vulnerability, we propose implementing a simple reflection mechanism that prompts the agent to reassess the safety of its actions during execution, which can help reduce the success of such attacks.</abstract>
<identifier type="citekey">nakash-etal-2025-breaking</identifier>
<identifier type="doi">10.18653/v1/2025.findings-naacl.363</identifier>
<location>
<url>https://aclanthology.org/2025.findings-naacl.363/</url>
</location>
<part>
<date>2025-04</date>
<extent unit="page">
<start>6484</start>
<end>6509</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Breaking ReAct Agents: Foot-in-the-Door Attack Will Get You In
%A Nakash, Itay
%A Kour, George
%A Uziel, Guy
%A Anaby Tavor, Ateret
%Y Chiruzzo, Luis
%Y Ritter, Alan
%Y Wang, Lu
%S Findings of the Association for Computational Linguistics: NAACL 2025
%D 2025
%8 April
%I Association for Computational Linguistics
%C Albuquerque, New Mexico
%@ 979-8-89176-195-7
%F nakash-etal-2025-breaking
%X Following the advancement of large language models (LLMs), the development of LLM-based autonomous agents has become prevalent.As a result, the need to understand the security vulnerabilities of these agents has become a critical task. We examine how ReAct agents can be exploited using a straightforward yet effective method we refer to as the foot-in-the-door attack.Our experiments show that indirect prompt injection attacks, prompted by harmless and unrelated requests (such as basic calculations) can significantly increase the likelihood of the agent performing subsequent malicious actions.Our results show that once a ReAct agent’s thought includes a specific tool or action, the likelihood of executing this tool in the subsequent steps increases significantly, as the agent seldom re-evaluates its actions. Consequently, even random, harmless requests can establish a ‘foot-in-the-door’, allowing an attacker to embed malicious instructions into the agent’s thought process, making it more susceptible to harmful directives.To mitigate this vulnerability, we propose implementing a simple reflection mechanism that prompts the agent to reassess the safety of its actions during execution, which can help reduce the success of such attacks.
%R 10.18653/v1/2025.findings-naacl.363
%U https://aclanthology.org/2025.findings-naacl.363/
%U https://doi.org/10.18653/v1/2025.findings-naacl.363
%P 6484-6509
Markdown (Informal)
[Breaking ReAct Agents: Foot-in-the-Door Attack Will Get You In](https://aclanthology.org/2025.findings-naacl.363/) (Nakash et al., Findings 2025)
ACL