@inproceedings{wang-etal-2024-interpreting,
title = "Interpreting Answers to Yes-No Questions in Dialogues from Multiple Domains",
author = "Wang, Zijie and
Rashid, Farzana and
Blanco, Eduardo",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-naacl.136/",
doi = "10.18653/v1/2024.findings-naacl.136",
pages = "2111--2128",
abstract = "People often answer yes-no questions without explicitly saying yes, no, or similar polar key-words. Figuring out the meaning of indirectanswers is challenging, even for large language models. In this paper, we investigate this problem working with dialogues from multiple domains. We present new benchmarks in three diverse domains: movie scripts, tennis interviews, and airline customer service. We present an approach grounded on distant supervision and blended training to quickly adapt to a new dialogue domain. Experimental results show that our approach is never detrimental and yields F1 improvements as high as 11-34{\%}."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wang-etal-2024-interpreting">
<titleInfo>
<title>Interpreting Answers to Yes-No Questions in Dialogues from Multiple Domains</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zijie</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Farzana</namePart>
<namePart type="family">Rashid</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eduardo</namePart>
<namePart type="family">Blanco</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: NAACL 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Duh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Helena</namePart>
<namePart type="family">Gomez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bethard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>People often answer yes-no questions without explicitly saying yes, no, or similar polar key-words. Figuring out the meaning of indirectanswers is challenging, even for large language models. In this paper, we investigate this problem working with dialogues from multiple domains. We present new benchmarks in three diverse domains: movie scripts, tennis interviews, and airline customer service. We present an approach grounded on distant supervision and blended training to quickly adapt to a new dialogue domain. Experimental results show that our approach is never detrimental and yields F1 improvements as high as 11-34%.</abstract>
<identifier type="citekey">wang-etal-2024-interpreting</identifier>
<identifier type="doi">10.18653/v1/2024.findings-naacl.136</identifier>
<location>
<url>https://aclanthology.org/2024.findings-naacl.136/</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>2111</start>
<end>2128</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Interpreting Answers to Yes-No Questions in Dialogues from Multiple Domains
%A Wang, Zijie
%A Rashid, Farzana
%A Blanco, Eduardo
%Y Duh, Kevin
%Y Gomez, Helena
%Y Bethard, Steven
%S Findings of the Association for Computational Linguistics: NAACL 2024
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F wang-etal-2024-interpreting
%X People often answer yes-no questions without explicitly saying yes, no, or similar polar key-words. Figuring out the meaning of indirectanswers is challenging, even for large language models. In this paper, we investigate this problem working with dialogues from multiple domains. We present new benchmarks in three diverse domains: movie scripts, tennis interviews, and airline customer service. We present an approach grounded on distant supervision and blended training to quickly adapt to a new dialogue domain. Experimental results show that our approach is never detrimental and yields F1 improvements as high as 11-34%.
%R 10.18653/v1/2024.findings-naacl.136
%U https://aclanthology.org/2024.findings-naacl.136/
%U https://doi.org/10.18653/v1/2024.findings-naacl.136
%P 2111-2128
Markdown (Informal)
[Interpreting Answers to Yes-No Questions in Dialogues from Multiple Domains](https://aclanthology.org/2024.findings-naacl.136/) (Wang et al., Findings 2024)
ACL