@inproceedings{doddapaneni-etal-2024-user,
title = "User Embedding Model for Personalized Language Prompting",
author = "Doddapaneni, Sumanth and
Sayana, Krishna and
Jash, Ambarish and
Sodhi, Sukhdeep and
Kuzmin, Dima",
editor = "Deshpande, Ameet and
Hwang, EunJeong and
Murahari, Vishvak and
Park, Joon Sung and
Yang, Diyi and
Sabharwal, Ashish and
Narasimhan, Karthik and
Kalyan, Ashwin",
booktitle = "Proceedings of the 1st Workshop on Personalization of Generative AI Systems (PERSONALIZE 2024)",
month = mar,
year = "2024",
address = "St. Julians, Malta",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.personalize-1.12/",
pages = "124--131",
abstract = "Modeling long user histories plays a pivotal role in enhancing recommendation systems, allowing to capture users' evolving preferences, resulting in more precise and personalized recommendations. In this study, we tackle the challenges of modeling long user histories for preference understanding in natural language. Specifically, we introduce a new User Embedding Module (UEM) that efficiently processes user history in free-form text by compressing and representing them as embeddings, to use them as soft prompts to a language model (LM). Our experiments demonstrate the superior capability of this approach in handling significantly longer histories compared to conventional text-based methods, yielding substantial improvements in predictive performance. Models trained using our approach exhibit substantial enhancements, with up to 0.21 and 0.25 F1 points improvement over the text-based prompting baselines. The main contribution of this research is to demonstrate the ability to bias language models via user signals."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="doddapaneni-etal-2024-user">
<titleInfo>
<title>User Embedding Model for Personalized Language Prompting</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sumanth</namePart>
<namePart type="family">Doddapaneni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Krishna</namePart>
<namePart type="family">Sayana</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ambarish</namePart>
<namePart type="family">Jash</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sukhdeep</namePart>
<namePart type="family">Sodhi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dima</namePart>
<namePart type="family">Kuzmin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-03</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Personalization of Generative AI Systems (PERSONALIZE 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ameet</namePart>
<namePart type="family">Deshpande</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">EunJeong</namePart>
<namePart type="family">Hwang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vishvak</namePart>
<namePart type="family">Murahari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joon</namePart>
<namePart type="given">Sung</namePart>
<namePart type="family">Park</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Diyi</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ashish</namePart>
<namePart type="family">Sabharwal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Karthik</namePart>
<namePart type="family">Narasimhan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ashwin</namePart>
<namePart type="family">Kalyan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">St. Julians, Malta</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Modeling long user histories plays a pivotal role in enhancing recommendation systems, allowing to capture users’ evolving preferences, resulting in more precise and personalized recommendations. In this study, we tackle the challenges of modeling long user histories for preference understanding in natural language. Specifically, we introduce a new User Embedding Module (UEM) that efficiently processes user history in free-form text by compressing and representing them as embeddings, to use them as soft prompts to a language model (LM). Our experiments demonstrate the superior capability of this approach in handling significantly longer histories compared to conventional text-based methods, yielding substantial improvements in predictive performance. Models trained using our approach exhibit substantial enhancements, with up to 0.21 and 0.25 F1 points improvement over the text-based prompting baselines. The main contribution of this research is to demonstrate the ability to bias language models via user signals.</abstract>
<identifier type="citekey">doddapaneni-etal-2024-user</identifier>
<location>
<url>https://aclanthology.org/2024.personalize-1.12/</url>
</location>
<part>
<date>2024-03</date>
<extent unit="page">
<start>124</start>
<end>131</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T User Embedding Model for Personalized Language Prompting
%A Doddapaneni, Sumanth
%A Sayana, Krishna
%A Jash, Ambarish
%A Sodhi, Sukhdeep
%A Kuzmin, Dima
%Y Deshpande, Ameet
%Y Hwang, EunJeong
%Y Murahari, Vishvak
%Y Park, Joon Sung
%Y Yang, Diyi
%Y Sabharwal, Ashish
%Y Narasimhan, Karthik
%Y Kalyan, Ashwin
%S Proceedings of the 1st Workshop on Personalization of Generative AI Systems (PERSONALIZE 2024)
%D 2024
%8 March
%I Association for Computational Linguistics
%C St. Julians, Malta
%F doddapaneni-etal-2024-user
%X Modeling long user histories plays a pivotal role in enhancing recommendation systems, allowing to capture users’ evolving preferences, resulting in more precise and personalized recommendations. In this study, we tackle the challenges of modeling long user histories for preference understanding in natural language. Specifically, we introduce a new User Embedding Module (UEM) that efficiently processes user history in free-form text by compressing and representing them as embeddings, to use them as soft prompts to a language model (LM). Our experiments demonstrate the superior capability of this approach in handling significantly longer histories compared to conventional text-based methods, yielding substantial improvements in predictive performance. Models trained using our approach exhibit substantial enhancements, with up to 0.21 and 0.25 F1 points improvement over the text-based prompting baselines. The main contribution of this research is to demonstrate the ability to bias language models via user signals.
%U https://aclanthology.org/2024.personalize-1.12/
%P 124-131
Markdown (Informal)
[User Embedding Model for Personalized Language Prompting](https://aclanthology.org/2024.personalize-1.12/) (Doddapaneni et al., PERSONALIZE 2024)
ACL
- Sumanth Doddapaneni, Krishna Sayana, Ambarish Jash, Sukhdeep Sodhi, and Dima Kuzmin. 2024. User Embedding Model for Personalized Language Prompting. In Proceedings of the 1st Workshop on Personalization of Generative AI Systems (PERSONALIZE 2024), pages 124–131, St. Julians, Malta. Association for Computational Linguistics.