@inproceedings{mendonca-etal-2023-simple,
title = "Simple {LLM} Prompting is State-of-the-Art for Robust and Multilingual Dialogue Evaluation",
author = "Mendon{\c{c}}a, John and
Pereira, Patr{\'\i}cia and
Moniz, Helena and
Paulo Carvalho, Joao and
Lavie, Alon and
Trancoso, Isabel",
editor = "Chen, Yun-Nung and
Crook, Paul and
Galley, Michel and
Ghazarian, Sarik and
Gunasekara, Chulaka and
Gupta, Raghav and
Hedayatnia, Behnam and
Kottur, Satwik and
Moon, Seungwhan and
Zhang, Chen",
booktitle = "Proceedings of The Eleventh Dialog System Technology Challenge",
month = sep,
year = "2023",
address = "Prague, Czech Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.dstc-1.16",
pages = "133--143",
abstract = "Despite significant research effort in the development of automatic dialogue evaluation metrics, little thought is given to evaluating dialogues other than in English. At the same time, ensuring metrics are invariant to semantically similar responses is also an overlooked topic. In order to achieve the desired properties of robustness and multilinguality for dialogue evaluation metrics, we propose a novel framework that takes advantage of the strengths of current evaluation models with the newly-established paradigm of prompting Large Language Models (LLMs). Empirical results show our framework achieves state of the art results in terms of mean Spearman correlation scores across several benchmarks and ranks first place on both the Robust and Multilingual tasks of the DSTC11 Track 4 {``}Automatic Evaluation Metrics for Open-Domain Dialogue Systems{''}, proving the evaluation capabilities of prompted LLMs.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="mendonca-etal-2023-simple">
<titleInfo>
<title>Simple LLM Prompting is State-of-the-Art for Robust and Multilingual Dialogue Evaluation</title>
</titleInfo>
<name type="personal">
<namePart type="given">John</namePart>
<namePart type="family">Mendonça</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Patrícia</namePart>
<namePart type="family">Pereira</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Helena</namePart>
<namePart type="family">Moniz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joao</namePart>
<namePart type="family">Paulo Carvalho</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alon</namePart>
<namePart type="family">Lavie</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isabel</namePart>
<namePart type="family">Trancoso</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of The Eleventh Dialog System Technology Challenge</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Paul</namePart>
<namePart type="family">Crook</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michel</namePart>
<namePart type="family">Galley</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sarik</namePart>
<namePart type="family">Ghazarian</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chulaka</namePart>
<namePart type="family">Gunasekara</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Raghav</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Behnam</namePart>
<namePart type="family">Hedayatnia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Satwik</namePart>
<namePart type="family">Kottur</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Seungwhan</namePart>
<namePart type="family">Moon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chen</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Prague, Czech Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Despite significant research effort in the development of automatic dialogue evaluation metrics, little thought is given to evaluating dialogues other than in English. At the same time, ensuring metrics are invariant to semantically similar responses is also an overlooked topic. In order to achieve the desired properties of robustness and multilinguality for dialogue evaluation metrics, we propose a novel framework that takes advantage of the strengths of current evaluation models with the newly-established paradigm of prompting Large Language Models (LLMs). Empirical results show our framework achieves state of the art results in terms of mean Spearman correlation scores across several benchmarks and ranks first place on both the Robust and Multilingual tasks of the DSTC11 Track 4 “Automatic Evaluation Metrics for Open-Domain Dialogue Systems”, proving the evaluation capabilities of prompted LLMs.</abstract>
<identifier type="citekey">mendonca-etal-2023-simple</identifier>
<location>
<url>https://aclanthology.org/2023.dstc-1.16</url>
</location>
<part>
<date>2023-09</date>
<extent unit="page">
<start>133</start>
<end>143</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Simple LLM Prompting is State-of-the-Art for Robust and Multilingual Dialogue Evaluation
%A Mendonça, John
%A Pereira, Patrícia
%A Moniz, Helena
%A Paulo Carvalho, Joao
%A Lavie, Alon
%A Trancoso, Isabel
%Y Chen, Yun-Nung
%Y Crook, Paul
%Y Galley, Michel
%Y Ghazarian, Sarik
%Y Gunasekara, Chulaka
%Y Gupta, Raghav
%Y Hedayatnia, Behnam
%Y Kottur, Satwik
%Y Moon, Seungwhan
%Y Zhang, Chen
%S Proceedings of The Eleventh Dialog System Technology Challenge
%D 2023
%8 September
%I Association for Computational Linguistics
%C Prague, Czech Republic
%F mendonca-etal-2023-simple
%X Despite significant research effort in the development of automatic dialogue evaluation metrics, little thought is given to evaluating dialogues other than in English. At the same time, ensuring metrics are invariant to semantically similar responses is also an overlooked topic. In order to achieve the desired properties of robustness and multilinguality for dialogue evaluation metrics, we propose a novel framework that takes advantage of the strengths of current evaluation models with the newly-established paradigm of prompting Large Language Models (LLMs). Empirical results show our framework achieves state of the art results in terms of mean Spearman correlation scores across several benchmarks and ranks first place on both the Robust and Multilingual tasks of the DSTC11 Track 4 “Automatic Evaluation Metrics for Open-Domain Dialogue Systems”, proving the evaluation capabilities of prompted LLMs.
%U https://aclanthology.org/2023.dstc-1.16
%P 133-143
Markdown (Informal)
[Simple LLM Prompting is State-of-the-Art for Robust and Multilingual Dialogue Evaluation](https://aclanthology.org/2023.dstc-1.16) (Mendonça et al., DSTC-WS 2023)
ACL