@inproceedings{mourya-etal-2026-direct,
title = "{DIRECT}: Directional Relevance in Conversational Trajectories",
author = "Mourya, Anshuman and
Mukherjee, Rajdeep and
Jolly, Prerna and
Puranik, Vinayak S and
Kaveri, Sivaramakrishnan R",
editor = {Matusevych, Yevgen and
Eryi{\u{g}}it, G{\"u}l{\c{s}}en and
Aletras, Nikolaos},
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 5: Industry Track)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2026.eacl-industry.71/",
pages = "948--957",
ISBN = "979-8-89176-384-5",
abstract = "Conversational Agents have become ubiquitous across application domains, such as, shopping assistants, medical diagnosis, autonomous task planning etc. Users interacting with these agents often fail to understand how to start a conversation or what to ask next to obtain the desired information. To enable seamless and hassle-free user-agent interactions, we introduce Next Question Suggestions (NQS), which are essentially highly relevant follow-up question recommendations that act as conversation starters or discover-ability tools to capture non-trivial user intents, leading to more engaging conversations. Relying on LLMs for both response as well as NQS generation is a costly ask in latency-constrained commercial settings, with an added risk of handling potentially unsafe or unanswerable generated queries. A key component of building an efficient low-latency NQS experience is, therefore, retrieval (or embedding) models that fetch the most-relevant candidate questions from an offline pre-curated Question Bank (QB). Off-the-shelf embedding models cannot capture domain-specific nuances and more importantly the directionality inherent in follow-up question recommendations. In this work, we propose an end-to-end retrieval system, DIRECT that is optimized to model directional relevance. Given a user query, it produces a ranked list of highly relevant follow-up question recommendations within 1 sec. Our system also contains an LLM-as-a-judge component, tuned on proprietary user-agent interaction logs, to evaluate the end-to-end performance in terms of CTR."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="mourya-etal-2026-direct">
<titleInfo>
<title>DIRECT: Directional Relevance in Conversational Trajectories</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anshuman</namePart>
<namePart type="family">Mourya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rajdeep</namePart>
<namePart type="family">Mukherjee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Prerna</namePart>
<namePart type="family">Jolly</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vinayak</namePart>
<namePart type="given">S</namePart>
<namePart type="family">Puranik</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sivaramakrishnan</namePart>
<namePart type="given">R</namePart>
<namePart type="family">Kaveri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2026-03</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 19th Conference of the European Chapter of the Association for Computational Linguistics (Volume 5: Industry Track)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yevgen</namePart>
<namePart type="family">Matusevych</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gülşen</namePart>
<namePart type="family">Eryiğit</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nikolaos</namePart>
<namePart type="family">Aletras</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Rabat, Morocco</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-384-5</identifier>
</relatedItem>
<abstract>Conversational Agents have become ubiquitous across application domains, such as, shopping assistants, medical diagnosis, autonomous task planning etc. Users interacting with these agents often fail to understand how to start a conversation or what to ask next to obtain the desired information. To enable seamless and hassle-free user-agent interactions, we introduce Next Question Suggestions (NQS), which are essentially highly relevant follow-up question recommendations that act as conversation starters or discover-ability tools to capture non-trivial user intents, leading to more engaging conversations. Relying on LLMs for both response as well as NQS generation is a costly ask in latency-constrained commercial settings, with an added risk of handling potentially unsafe or unanswerable generated queries. A key component of building an efficient low-latency NQS experience is, therefore, retrieval (or embedding) models that fetch the most-relevant candidate questions from an offline pre-curated Question Bank (QB). Off-the-shelf embedding models cannot capture domain-specific nuances and more importantly the directionality inherent in follow-up question recommendations. In this work, we propose an end-to-end retrieval system, DIRECT that is optimized to model directional relevance. Given a user query, it produces a ranked list of highly relevant follow-up question recommendations within 1 sec. Our system also contains an LLM-as-a-judge component, tuned on proprietary user-agent interaction logs, to evaluate the end-to-end performance in terms of CTR.</abstract>
<identifier type="citekey">mourya-etal-2026-direct</identifier>
<location>
<url>https://aclanthology.org/2026.eacl-industry.71/</url>
</location>
<part>
<date>2026-03</date>
<extent unit="page">
<start>948</start>
<end>957</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T DIRECT: Directional Relevance in Conversational Trajectories
%A Mourya, Anshuman
%A Mukherjee, Rajdeep
%A Jolly, Prerna
%A Puranik, Vinayak S.
%A Kaveri, Sivaramakrishnan R.
%Y Matusevych, Yevgen
%Y Eryiğit, Gülşen
%Y Aletras, Nikolaos
%S Proceedings of the 19th Conference of the European Chapter of the Association for Computational Linguistics (Volume 5: Industry Track)
%D 2026
%8 March
%I Association for Computational Linguistics
%C Rabat, Morocco
%@ 979-8-89176-384-5
%F mourya-etal-2026-direct
%X Conversational Agents have become ubiquitous across application domains, such as, shopping assistants, medical diagnosis, autonomous task planning etc. Users interacting with these agents often fail to understand how to start a conversation or what to ask next to obtain the desired information. To enable seamless and hassle-free user-agent interactions, we introduce Next Question Suggestions (NQS), which are essentially highly relevant follow-up question recommendations that act as conversation starters or discover-ability tools to capture non-trivial user intents, leading to more engaging conversations. Relying on LLMs for both response as well as NQS generation is a costly ask in latency-constrained commercial settings, with an added risk of handling potentially unsafe or unanswerable generated queries. A key component of building an efficient low-latency NQS experience is, therefore, retrieval (or embedding) models that fetch the most-relevant candidate questions from an offline pre-curated Question Bank (QB). Off-the-shelf embedding models cannot capture domain-specific nuances and more importantly the directionality inherent in follow-up question recommendations. In this work, we propose an end-to-end retrieval system, DIRECT that is optimized to model directional relevance. Given a user query, it produces a ranked list of highly relevant follow-up question recommendations within 1 sec. Our system also contains an LLM-as-a-judge component, tuned on proprietary user-agent interaction logs, to evaluate the end-to-end performance in terms of CTR.
%U https://aclanthology.org/2026.eacl-industry.71/
%P 948-957
Markdown (Informal)
[DIRECT: Directional Relevance in Conversational Trajectories](https://aclanthology.org/2026.eacl-industry.71/) (Mourya et al., EACL 2026)
ACL
- Anshuman Mourya, Rajdeep Mukherjee, Prerna Jolly, Vinayak S Puranik, and Sivaramakrishnan R Kaveri. 2026. DIRECT: Directional Relevance in Conversational Trajectories. In Proceedings of the 19th Conference of the European Chapter of the Association for Computational Linguistics (Volume 5: Industry Track), pages 948–957, Rabat, Morocco. Association for Computational Linguistics.