@inproceedings{hausberger-etal-2026-read,
title = "Read Between the Tracks: Exploring {LLM}-driven Intent-based Music Recommendations",
author = "Hausberger, Anna and
J{\'o}s{\'a}r, Petra and
Schedl, Markus",
editor = "Epure, Elena V. and
Oramas, Sergio and
Doh, SeungHeon and
Ramoneda, Pedro and
Kruspe, Anna and
Sordo, Mohamed",
booktitle = "Proceedings of the 4th Workshop on {NLP} for Music and Audio ({NLP}4{M}us{A} 2026)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2026.nlp4musa-1.7/",
pages = "44--50",
ISBN = "979-8-89176-369-2",
abstract = "This paper evaluates the effectiveness of large language models (LLMs) on the task of context-aware music recommendation, specifically focusing on the alignment of music tracks with a listening intent, in addition to user preferences. We present a preliminary investigation in which five LLMs (variants of LLama, Qwen, and Mistral) are tasked with ranking a candidate set of tracks containing both ground-truth items (associated with specific user-intent pairs) and distractor items (containing user-relevant, intent-relevant, or non-user and non-intent relevant items). Our results show that LLMs rank intent-user-relevant items higher than the distract items, with ``Llama-3.1-8B-Instruct'' having the best performance (NDCG of $0.32_{0.20}$ vs. $0.20_{0.15}$). We further investigate whether performance differs when mentioning the listening intent explicitly in the prompt vs. implicitly given solely music preferences.Surprisingly, the LLMs achieved the best performance through an implicit indication of intent, versus explicitly adding it to the prompt, with ``Mistral-7B-Instruct-v0.3'' performing the best (NDCG of $0.37_{0.22}$ vs. $0.29_{0.18}$)."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hausberger-etal-2026-read">
<titleInfo>
<title>Read Between the Tracks: Exploring LLM-driven Intent-based Music Recommendations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Hausberger</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Petra</namePart>
<namePart type="family">Jósár</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Markus</namePart>
<namePart type="family">Schedl</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2026-03</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 4th Workshop on NLP for Music and Audio (NLP4MusA 2026)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Elena</namePart>
<namePart type="given">V</namePart>
<namePart type="family">Epure</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sergio</namePart>
<namePart type="family">Oramas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">SeungHeon</namePart>
<namePart type="family">Doh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pedro</namePart>
<namePart type="family">Ramoneda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Kruspe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohamed</namePart>
<namePart type="family">Sordo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Rabat, Morocco</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-369-2</identifier>
</relatedItem>
<abstract>This paper evaluates the effectiveness of large language models (LLMs) on the task of context-aware music recommendation, specifically focusing on the alignment of music tracks with a listening intent, in addition to user preferences. We present a preliminary investigation in which five LLMs (variants of LLama, Qwen, and Mistral) are tasked with ranking a candidate set of tracks containing both ground-truth items (associated with specific user-intent pairs) and distractor items (containing user-relevant, intent-relevant, or non-user and non-intent relevant items). Our results show that LLMs rank intent-user-relevant items higher than the distract items, with “Llama-3.1-8B-Instruct” having the best performance (NDCG of 0.32₀.20 vs. 0.20₀.15). We further investigate whether performance differs when mentioning the listening intent explicitly in the prompt vs. implicitly given solely music preferences.Surprisingly, the LLMs achieved the best performance through an implicit indication of intent, versus explicitly adding it to the prompt, with “Mistral-7B-Instruct-v0.3” performing the best (NDCG of 0.37₀.22 vs. 0.29₀.18).</abstract>
<identifier type="citekey">hausberger-etal-2026-read</identifier>
<location>
<url>https://aclanthology.org/2026.nlp4musa-1.7/</url>
</location>
<part>
<date>2026-03</date>
<extent unit="page">
<start>44</start>
<end>50</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Read Between the Tracks: Exploring LLM-driven Intent-based Music Recommendations
%A Hausberger, Anna
%A Jósár, Petra
%A Schedl, Markus
%Y Epure, Elena V.
%Y Oramas, Sergio
%Y Doh, SeungHeon
%Y Ramoneda, Pedro
%Y Kruspe, Anna
%Y Sordo, Mohamed
%S Proceedings of the 4th Workshop on NLP for Music and Audio (NLP4MusA 2026)
%D 2026
%8 March
%I Association for Computational Linguistics
%C Rabat, Morocco
%@ 979-8-89176-369-2
%F hausberger-etal-2026-read
%X This paper evaluates the effectiveness of large language models (LLMs) on the task of context-aware music recommendation, specifically focusing on the alignment of music tracks with a listening intent, in addition to user preferences. We present a preliminary investigation in which five LLMs (variants of LLama, Qwen, and Mistral) are tasked with ranking a candidate set of tracks containing both ground-truth items (associated with specific user-intent pairs) and distractor items (containing user-relevant, intent-relevant, or non-user and non-intent relevant items). Our results show that LLMs rank intent-user-relevant items higher than the distract items, with “Llama-3.1-8B-Instruct” having the best performance (NDCG of 0.32₀.20 vs. 0.20₀.15). We further investigate whether performance differs when mentioning the listening intent explicitly in the prompt vs. implicitly given solely music preferences.Surprisingly, the LLMs achieved the best performance through an implicit indication of intent, versus explicitly adding it to the prompt, with “Mistral-7B-Instruct-v0.3” performing the best (NDCG of 0.37₀.22 vs. 0.29₀.18).
%U https://aclanthology.org/2026.nlp4musa-1.7/
%P 44-50
Markdown (Informal)
[Read Between the Tracks: Exploring LLM-driven Intent-based Music Recommendations](https://aclanthology.org/2026.nlp4musa-1.7/) (Hausberger et al., NLP4MusA 2026)
ACL