@inproceedings{yuan-etal-2025-personalized,
title = "Personalized Large Language Model Assistant with Evolving Conditional Memory",
author = "Yuan, Ruifeng and
Sun, Shichao and
Li, Yongqi and
Wang, Zili and
Cao, Ziqiang and
Li, Wenjie",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.coling-main.254/",
pages = "3764--3777",
abstract = "With the rapid development of large language models, AI assistants like ChatGPT have become increasingly integrated into people`s works and lives but are limited in personalized services. In this paper, we present a plug-and-play framework that could facilitate personalized large language model assistants with evolving conditional memory. The personalized assistant focuses on intelligently preserving the knowledge and experience from the history dialogue with the user, which can be applied to future tailored responses that better align with the user`s preferences. Generally, the assistant generates a set of records from the dialogue, stores them in a memory bank, and retrieves related memory to improve the quality of the response. For the crucial memory design, we explore different ways of constructing the memory and propose a new memorizing mechanism named conditional memory to enhance the memory management of the framework. We also investigate the retrieval and usage of memory in the generation process. To better evaluate the personalized assistants' abilities, we build the first evaluation benchmark from three critical aspects: continuing previous dialogue, learning personalized knowledge and learning from user feedback. The experimental results illustrate the effectiveness of our method."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yuan-etal-2025-personalized">
<titleInfo>
<title>Personalized Large Language Model Assistant with Evolving Conditional Memory</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ruifeng</namePart>
<namePart type="family">Yuan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shichao</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yongqi</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zili</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ziqiang</namePart>
<namePart type="family">Cao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wenjie</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-01</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 31st International Conference on Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Owen</namePart>
<namePart type="family">Rambow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leo</namePart>
<namePart type="family">Wanner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marianna</namePart>
<namePart type="family">Apidianaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hend</namePart>
<namePart type="family">Al-Khalifa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Barbara</namePart>
<namePart type="given">Di</namePart>
<namePart type="family">Eugenio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Schockaert</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, UAE</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>With the rapid development of large language models, AI assistants like ChatGPT have become increasingly integrated into people‘s works and lives but are limited in personalized services. In this paper, we present a plug-and-play framework that could facilitate personalized large language model assistants with evolving conditional memory. The personalized assistant focuses on intelligently preserving the knowledge and experience from the history dialogue with the user, which can be applied to future tailored responses that better align with the user‘s preferences. Generally, the assistant generates a set of records from the dialogue, stores them in a memory bank, and retrieves related memory to improve the quality of the response. For the crucial memory design, we explore different ways of constructing the memory and propose a new memorizing mechanism named conditional memory to enhance the memory management of the framework. We also investigate the retrieval and usage of memory in the generation process. To better evaluate the personalized assistants’ abilities, we build the first evaluation benchmark from three critical aspects: continuing previous dialogue, learning personalized knowledge and learning from user feedback. The experimental results illustrate the effectiveness of our method.</abstract>
<identifier type="citekey">yuan-etal-2025-personalized</identifier>
<location>
<url>https://aclanthology.org/2025.coling-main.254/</url>
</location>
<part>
<date>2025-01</date>
<extent unit="page">
<start>3764</start>
<end>3777</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Personalized Large Language Model Assistant with Evolving Conditional Memory
%A Yuan, Ruifeng
%A Sun, Shichao
%A Li, Yongqi
%A Wang, Zili
%A Cao, Ziqiang
%A Li, Wenjie
%Y Rambow, Owen
%Y Wanner, Leo
%Y Apidianaki, Marianna
%Y Al-Khalifa, Hend
%Y Eugenio, Barbara Di
%Y Schockaert, Steven
%S Proceedings of the 31st International Conference on Computational Linguistics
%D 2025
%8 January
%I Association for Computational Linguistics
%C Abu Dhabi, UAE
%F yuan-etal-2025-personalized
%X With the rapid development of large language models, AI assistants like ChatGPT have become increasingly integrated into people‘s works and lives but are limited in personalized services. In this paper, we present a plug-and-play framework that could facilitate personalized large language model assistants with evolving conditional memory. The personalized assistant focuses on intelligently preserving the knowledge and experience from the history dialogue with the user, which can be applied to future tailored responses that better align with the user‘s preferences. Generally, the assistant generates a set of records from the dialogue, stores them in a memory bank, and retrieves related memory to improve the quality of the response. For the crucial memory design, we explore different ways of constructing the memory and propose a new memorizing mechanism named conditional memory to enhance the memory management of the framework. We also investigate the retrieval and usage of memory in the generation process. To better evaluate the personalized assistants’ abilities, we build the first evaluation benchmark from three critical aspects: continuing previous dialogue, learning personalized knowledge and learning from user feedback. The experimental results illustrate the effectiveness of our method.
%U https://aclanthology.org/2025.coling-main.254/
%P 3764-3777
Markdown (Informal)
[Personalized Large Language Model Assistant with Evolving Conditional Memory](https://aclanthology.org/2025.coling-main.254/) (Yuan et al., COLING 2025)
ACL