@inproceedings{su-etal-2025-personalized,
title = "Personalized Question Answering with User Profile Generation and Compression",
author = "Su, Hang and
Yang, Yun and
Liu, Tianyang and
Liu, Xin and
Pu, Peng and
Lu, Xuesong",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.255/",
pages = "4744--4763",
ISBN = "979-8-89176-335-7",
abstract = "Large language models (LLMs) offer a novel and convenient avenue for humans to acquire knowledge. However, LLMs are prone to providing ``midguy'' answers regardless of users' knowledge background, thereby failing to meet each user{'}s personalized needs. To tackle the problem, we propose to generate personalized answers with LLMs based on users' past question-answering records. We dynamically generate and update a user{'}s domain and global profiles as the user asks questions, and use the latest profile as the context to generate the answer for a newly-asked question. To save tokens, we propose to compress the domain profile into a set of keywords and use the keywords to prompt LLMs. We theoretically analyze the effectiveness of the compression strategy. Experimental results show that our method can generate more personalized answers than comparative methods. The code and dataset are available at https://github.com/DaSESmartEdu/PQA."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="su-etal-2025-personalized">
<titleInfo>
<title>Personalized Question Answering with User Profile Generation and Compression</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hang</namePart>
<namePart type="family">Su</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tianyang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xin</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peng</namePart>
<namePart type="family">Pu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xuesong</namePart>
<namePart type="family">Lu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>Large language models (LLMs) offer a novel and convenient avenue for humans to acquire knowledge. However, LLMs are prone to providing “midguy” answers regardless of users’ knowledge background, thereby failing to meet each user’s personalized needs. To tackle the problem, we propose to generate personalized answers with LLMs based on users’ past question-answering records. We dynamically generate and update a user’s domain and global profiles as the user asks questions, and use the latest profile as the context to generate the answer for a newly-asked question. To save tokens, we propose to compress the domain profile into a set of keywords and use the keywords to prompt LLMs. We theoretically analyze the effectiveness of the compression strategy. Experimental results show that our method can generate more personalized answers than comparative methods. The code and dataset are available at https://github.com/DaSESmartEdu/PQA.</abstract>
<identifier type="citekey">su-etal-2025-personalized</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.255/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>4744</start>
<end>4763</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Personalized Question Answering with User Profile Generation and Compression
%A Su, Hang
%A Yang, Yun
%A Liu, Tianyang
%A Liu, Xin
%A Pu, Peng
%A Lu, Xuesong
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F su-etal-2025-personalized
%X Large language models (LLMs) offer a novel and convenient avenue for humans to acquire knowledge. However, LLMs are prone to providing “midguy” answers regardless of users’ knowledge background, thereby failing to meet each user’s personalized needs. To tackle the problem, we propose to generate personalized answers with LLMs based on users’ past question-answering records. We dynamically generate and update a user’s domain and global profiles as the user asks questions, and use the latest profile as the context to generate the answer for a newly-asked question. To save tokens, we propose to compress the domain profile into a set of keywords and use the keywords to prompt LLMs. We theoretically analyze the effectiveness of the compression strategy. Experimental results show that our method can generate more personalized answers than comparative methods. The code and dataset are available at https://github.com/DaSESmartEdu/PQA.
%U https://aclanthology.org/2025.findings-emnlp.255/
%P 4744-4763
Markdown (Informal)
[Personalized Question Answering with User Profile Generation and Compression](https://aclanthology.org/2025.findings-emnlp.255/) (Su et al., Findings 2025)
ACL