@inproceedings{steindl-etal-2025-improved,
title = "An Improved, Strong Baseline for Pre-Trained Large Language Models as Task-Oriented Dialogue Systems",
author = {Steindl, Sebastian and
Kestler, Andr{\'e} and
Sch{\"a}fer, Ulrich and
Ludwig, Bernd},
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.610/",
doi = "10.18653/v1/2025.findings-emnlp.610",
pages = "11388--11398",
ISBN = "979-8-89176-335-7",
abstract = "Large Language Models (LLMs) have recently been studied within the context of Task-Oriented Dialogues (TOD). However, previous research is inconclusive on their effectiveness, with some studies claiming that LLMs are unable to perform the TOD task and others making sophisticated additions to their setup and coming to opposite conclusions. In this work, we take a detailed look at previous results that state LLMs perform insufficiently as a TOD system. As a result, we propose an updated, stronger baseline for multiple out-of-the-box LLM performances as TOD systems. We introduce a Self-Checking mechanism as a simple, yet effective, component to drastically improve their performance. Our results show that newer, pre-trained LLMs can, in fact, perform as TOD systems out-of-the-box, challenging the previous understanding. We show that LLMs can even perform competitively to fine-tuned models in certain metrics. Based on this, we propose directions for future research. Our code is published on Github."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="steindl-etal-2025-improved">
<titleInfo>
<title>An Improved, Strong Baseline for Pre-Trained Large Language Models as Task-Oriented Dialogue Systems</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sebastian</namePart>
<namePart type="family">Steindl</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">André</namePart>
<namePart type="family">Kestler</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ulrich</namePart>
<namePart type="family">Schäfer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bernd</namePart>
<namePart type="family">Ludwig</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>Large Language Models (LLMs) have recently been studied within the context of Task-Oriented Dialogues (TOD). However, previous research is inconclusive on their effectiveness, with some studies claiming that LLMs are unable to perform the TOD task and others making sophisticated additions to their setup and coming to opposite conclusions. In this work, we take a detailed look at previous results that state LLMs perform insufficiently as a TOD system. As a result, we propose an updated, stronger baseline for multiple out-of-the-box LLM performances as TOD systems. We introduce a Self-Checking mechanism as a simple, yet effective, component to drastically improve their performance. Our results show that newer, pre-trained LLMs can, in fact, perform as TOD systems out-of-the-box, challenging the previous understanding. We show that LLMs can even perform competitively to fine-tuned models in certain metrics. Based on this, we propose directions for future research. Our code is published on Github.</abstract>
<identifier type="citekey">steindl-etal-2025-improved</identifier>
<identifier type="doi">10.18653/v1/2025.findings-emnlp.610</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.610/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>11388</start>
<end>11398</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T An Improved, Strong Baseline for Pre-Trained Large Language Models as Task-Oriented Dialogue Systems
%A Steindl, Sebastian
%A Kestler, André
%A Schäfer, Ulrich
%A Ludwig, Bernd
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F steindl-etal-2025-improved
%X Large Language Models (LLMs) have recently been studied within the context of Task-Oriented Dialogues (TOD). However, previous research is inconclusive on their effectiveness, with some studies claiming that LLMs are unable to perform the TOD task and others making sophisticated additions to their setup and coming to opposite conclusions. In this work, we take a detailed look at previous results that state LLMs perform insufficiently as a TOD system. As a result, we propose an updated, stronger baseline for multiple out-of-the-box LLM performances as TOD systems. We introduce a Self-Checking mechanism as a simple, yet effective, component to drastically improve their performance. Our results show that newer, pre-trained LLMs can, in fact, perform as TOD systems out-of-the-box, challenging the previous understanding. We show that LLMs can even perform competitively to fine-tuned models in certain metrics. Based on this, we propose directions for future research. Our code is published on Github.
%R 10.18653/v1/2025.findings-emnlp.610
%U https://aclanthology.org/2025.findings-emnlp.610/
%U https://doi.org/10.18653/v1/2025.findings-emnlp.610
%P 11388-11398
Markdown (Informal)
[An Improved, Strong Baseline for Pre-Trained Large Language Models as Task-Oriented Dialogue Systems](https://aclanthology.org/2025.findings-emnlp.610/) (Steindl et al., Findings 2025)
ACL