@inproceedings{elizabeth-etal-2025-neural,
title = "Neural Models and Language Model Prompting for the Multidimensional Evaluation of Open-Ended Conversations",
author = "Elizabeth, Michelle and
Kasicka, Alicja and
Krawczyk, Natalia and
Ochs, Magalie and
Lecorv{\'e}, Gw{\'e}nol{\'e} and
Gromada, Justyna and
Rojas-Barahona, Lina M.",
editor = "Hedayatnia, Behnam and
Chen, Vivian and
Chen, Zhang and
Gupta, Raghav and
Galley, Michel",
booktitle = "Proceedings of the Twelfth Dialog System Technology Challenge",
month = aug,
year = "2025",
address = "Avignon, France",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.dstc-1.1/",
pages = "1--16",
ISBN = "979-8-89176-330-2",
abstract = "The growing number of generative AI-based dialogue systems has made their evaluation a crucial challenge. This paper presents our contribution to this important problem through the Dialogue System Technology Challenge (DSTC-12, Track 1), where we developed models to predict dialogue-level, dimension-specific scores. Given the constraint of using relatively small models (i.e. fewer than 13 billion parameters) our work follows two main strategies: employing Language Models (LMs) as evaluators through prompting, and training encoder-based classification and regression models.Our results show that while LM prompting achieves only modest correlations with human judgments, it still ranks second on the test set, outperformed only by the baseline.The regression and classification models, with significantly fewer parameters, demonstrate high correlation for some dimensions on the validation set. Although their performance decreases on the test set, it is important to note that the test set contains annotations with significantly different score ranges for some of the dimensions with respect to the train and validation sets."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="elizabeth-etal-2025-neural">
<titleInfo>
<title>Neural Models and Language Model Prompting for the Multidimensional Evaluation of Open-Ended Conversations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Michelle</namePart>
<namePart type="family">Elizabeth</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alicja</namePart>
<namePart type="family">Kasicka</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Natalia</namePart>
<namePart type="family">Krawczyk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Magalie</namePart>
<namePart type="family">Ochs</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gwénolé</namePart>
<namePart type="family">Lecorvé</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Justyna</namePart>
<namePart type="family">Gromada</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lina</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Rojas-Barahona</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Twelfth Dialog System Technology Challenge</title>
</titleInfo>
<name type="personal">
<namePart type="given">Behnam</namePart>
<namePart type="family">Hedayatnia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivian</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhang</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Raghav</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michel</namePart>
<namePart type="family">Galley</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Avignon, France</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-330-2</identifier>
</relatedItem>
<abstract>The growing number of generative AI-based dialogue systems has made their evaluation a crucial challenge. This paper presents our contribution to this important problem through the Dialogue System Technology Challenge (DSTC-12, Track 1), where we developed models to predict dialogue-level, dimension-specific scores. Given the constraint of using relatively small models (i.e. fewer than 13 billion parameters) our work follows two main strategies: employing Language Models (LMs) as evaluators through prompting, and training encoder-based classification and regression models.Our results show that while LM prompting achieves only modest correlations with human judgments, it still ranks second on the test set, outperformed only by the baseline.The regression and classification models, with significantly fewer parameters, demonstrate high correlation for some dimensions on the validation set. Although their performance decreases on the test set, it is important to note that the test set contains annotations with significantly different score ranges for some of the dimensions with respect to the train and validation sets.</abstract>
<identifier type="citekey">elizabeth-etal-2025-neural</identifier>
<location>
<url>https://aclanthology.org/2025.dstc-1.1/</url>
</location>
<part>
<date>2025-08</date>
<extent unit="page">
<start>1</start>
<end>16</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Neural Models and Language Model Prompting for the Multidimensional Evaluation of Open-Ended Conversations
%A Elizabeth, Michelle
%A Kasicka, Alicja
%A Krawczyk, Natalia
%A Ochs, Magalie
%A Lecorvé, Gwénolé
%A Gromada, Justyna
%A Rojas-Barahona, Lina M.
%Y Hedayatnia, Behnam
%Y Chen, Vivian
%Y Chen, Zhang
%Y Gupta, Raghav
%Y Galley, Michel
%S Proceedings of the Twelfth Dialog System Technology Challenge
%D 2025
%8 August
%I Association for Computational Linguistics
%C Avignon, France
%@ 979-8-89176-330-2
%F elizabeth-etal-2025-neural
%X The growing number of generative AI-based dialogue systems has made their evaluation a crucial challenge. This paper presents our contribution to this important problem through the Dialogue System Technology Challenge (DSTC-12, Track 1), where we developed models to predict dialogue-level, dimension-specific scores. Given the constraint of using relatively small models (i.e. fewer than 13 billion parameters) our work follows two main strategies: employing Language Models (LMs) as evaluators through prompting, and training encoder-based classification and regression models.Our results show that while LM prompting achieves only modest correlations with human judgments, it still ranks second on the test set, outperformed only by the baseline.The regression and classification models, with significantly fewer parameters, demonstrate high correlation for some dimensions on the validation set. Although their performance decreases on the test set, it is important to note that the test set contains annotations with significantly different score ranges for some of the dimensions with respect to the train and validation sets.
%U https://aclanthology.org/2025.dstc-1.1/
%P 1-16
Markdown (Informal)
[Neural Models and Language Model Prompting for the Multidimensional Evaluation of Open-Ended Conversations](https://aclanthology.org/2025.dstc-1.1/) (Elizabeth et al., DSTC 2025)
ACL