@inproceedings{zhu-etal-2025-automated,
title = "The Automated but Risky Game: Modeling Agent-to-Agent Negotiations and Transactions in Consumer Markets",
author = "Zhu, Shenzhe and
Sun, Jiao and
Nian, Yi and
South, Tobin and
Pentland, Alex and
Pei, Jiaxin",
editor = "Aletras, Nikolaos and
Chalkidis, Ilias and
Barrett, Leslie and
Goanț{\u{a}}, C{\u{a}}t{\u{a}}lina and
Preoțiuc-Pietro, Daniel and
Spanakis, Gerasimos",
booktitle = "Proceedings of the Natural Legal Language Processing Workshop 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.nllp-1.2/",
pages = "16--16",
ISBN = "979-8-89176-338-8",
abstract = "AI agents are increasingly used in consumer-facing applications to assist with tasks such as product search, negotiation, and transaction execution. In this paper, we investigate a future setting where both consumers and merchants authorize AI agents to automate the negotiations and transactions in consumer settings. We aim to address two questions: (1) Do different LLM agents exhibit varying performances when making deals on behalf of their users? (2) What are the potential risks when we use AI agents to fully automate negotiations and deal-making in consumer settings? We designed an experimental framework to evaluate AI agents' capabilities and performance in real-world negotiation and transaction scenarios, and experimented with a range of open-source and closed-source LLMs. Our analysis reveals that deal-making with LLM agents in consumer settings is an inherently imbalanced game: different AI agents have large disparities in obtaining the best deals for their users. Furthermore, we found that LLMs' behavioral anomaly might lead to financial loss when deployed in real-world decision-making scenarios, such as overspending or making unreasonable deals. Our findings highlight that while automation can enhance transactional efficiency, it also poses nontrivial risks to consumer markets. Users should be careful when delegating business decisions to LLM agents."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhu-etal-2025-automated">
<titleInfo>
<title>The Automated but Risky Game: Modeling Agent-to-Agent Negotiations and Transactions in Consumer Markets</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shenzhe</namePart>
<namePart type="family">Zhu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiao</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yi</namePart>
<namePart type="family">Nian</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tobin</namePart>
<namePart type="family">South</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alex</namePart>
<namePart type="family">Pentland</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiaxin</namePart>
<namePart type="family">Pei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Natural Legal Language Processing Workshop 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nikolaos</namePart>
<namePart type="family">Aletras</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ilias</namePart>
<namePart type="family">Chalkidis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leslie</namePart>
<namePart type="family">Barrett</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cătălina</namePart>
<namePart type="family">Goanță</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Preoțiuc-Pietro</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gerasimos</namePart>
<namePart type="family">Spanakis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-338-8</identifier>
</relatedItem>
<abstract>AI agents are increasingly used in consumer-facing applications to assist with tasks such as product search, negotiation, and transaction execution. In this paper, we investigate a future setting where both consumers and merchants authorize AI agents to automate the negotiations and transactions in consumer settings. We aim to address two questions: (1) Do different LLM agents exhibit varying performances when making deals on behalf of their users? (2) What are the potential risks when we use AI agents to fully automate negotiations and deal-making in consumer settings? We designed an experimental framework to evaluate AI agents’ capabilities and performance in real-world negotiation and transaction scenarios, and experimented with a range of open-source and closed-source LLMs. Our analysis reveals that deal-making with LLM agents in consumer settings is an inherently imbalanced game: different AI agents have large disparities in obtaining the best deals for their users. Furthermore, we found that LLMs’ behavioral anomaly might lead to financial loss when deployed in real-world decision-making scenarios, such as overspending or making unreasonable deals. Our findings highlight that while automation can enhance transactional efficiency, it also poses nontrivial risks to consumer markets. Users should be careful when delegating business decisions to LLM agents.</abstract>
<identifier type="citekey">zhu-etal-2025-automated</identifier>
<location>
<url>https://aclanthology.org/2025.nllp-1.2/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>16</start>
<end>16</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T The Automated but Risky Game: Modeling Agent-to-Agent Negotiations and Transactions in Consumer Markets
%A Zhu, Shenzhe
%A Sun, Jiao
%A Nian, Yi
%A South, Tobin
%A Pentland, Alex
%A Pei, Jiaxin
%Y Aletras, Nikolaos
%Y Chalkidis, Ilias
%Y Barrett, Leslie
%Y Goanță, Cătălina
%Y Preoțiuc-Pietro, Daniel
%Y Spanakis, Gerasimos
%S Proceedings of the Natural Legal Language Processing Workshop 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-338-8
%F zhu-etal-2025-automated
%X AI agents are increasingly used in consumer-facing applications to assist with tasks such as product search, negotiation, and transaction execution. In this paper, we investigate a future setting where both consumers and merchants authorize AI agents to automate the negotiations and transactions in consumer settings. We aim to address two questions: (1) Do different LLM agents exhibit varying performances when making deals on behalf of their users? (2) What are the potential risks when we use AI agents to fully automate negotiations and deal-making in consumer settings? We designed an experimental framework to evaluate AI agents’ capabilities and performance in real-world negotiation and transaction scenarios, and experimented with a range of open-source and closed-source LLMs. Our analysis reveals that deal-making with LLM agents in consumer settings is an inherently imbalanced game: different AI agents have large disparities in obtaining the best deals for their users. Furthermore, we found that LLMs’ behavioral anomaly might lead to financial loss when deployed in real-world decision-making scenarios, such as overspending or making unreasonable deals. Our findings highlight that while automation can enhance transactional efficiency, it also poses nontrivial risks to consumer markets. Users should be careful when delegating business decisions to LLM agents.
%U https://aclanthology.org/2025.nllp-1.2/
%P 16-16
Markdown (Informal)
[The Automated but Risky Game: Modeling Agent-to-Agent Negotiations and Transactions in Consumer Markets](https://aclanthology.org/2025.nllp-1.2/) (Zhu et al., NLLP 2025)
ACL