@inproceedings{aliannejadi-etal-2021-building,
title = "Building and Evaluating Open-Domain Dialogue Corpora with Clarifying Questions",
author = "Aliannejadi, Mohammad and
Kiseleva, Julia and
Chuklin, Aleksandr and
Dalton, Jeff and
Burtsev, Mikhail",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.emnlp-main.367",
doi = "10.18653/v1/2021.emnlp-main.367",
pages = "4473--4484",
abstract = "Enabling open-domain dialogue systems to ask clarifying questions when appropriate is an important direction for improving the quality of the system response. Namely, for cases when a user request is not specific enough for a conversation system to provide an answer right away, it is desirable to ask a clarifying question to increase the chances of retrieving a satisfying answer. To address the problem of {`}asking clarifying questions in open-domain dialogues{'}: (1) we collect and release a new dataset focused on open-domain single- and multi-turn conversations, (2) we benchmark several state-of-the-art neural baselines, and (3) we propose a pipeline consisting of offline and online steps for evaluating the quality of clarifying questions in various dialogues. These contributions are suitable as a foundation for further research.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="aliannejadi-etal-2021-building">
<titleInfo>
<title>Building and Evaluating Open-Domain Dialogue Corpora with Clarifying Questions</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="family">Aliannejadi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julia</namePart>
<namePart type="family">Kiseleva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aleksandr</namePart>
<namePart type="family">Chuklin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jeff</namePart>
<namePart type="family">Dalton</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mikhail</namePart>
<namePart type="family">Burtsev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online and Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Enabling open-domain dialogue systems to ask clarifying questions when appropriate is an important direction for improving the quality of the system response. Namely, for cases when a user request is not specific enough for a conversation system to provide an answer right away, it is desirable to ask a clarifying question to increase the chances of retrieving a satisfying answer. To address the problem of ‘asking clarifying questions in open-domain dialogues’: (1) we collect and release a new dataset focused on open-domain single- and multi-turn conversations, (2) we benchmark several state-of-the-art neural baselines, and (3) we propose a pipeline consisting of offline and online steps for evaluating the quality of clarifying questions in various dialogues. These contributions are suitable as a foundation for further research.</abstract>
<identifier type="citekey">aliannejadi-etal-2021-building</identifier>
<identifier type="doi">10.18653/v1/2021.emnlp-main.367</identifier>
<location>
<url>https://aclanthology.org/2021.emnlp-main.367</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>4473</start>
<end>4484</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Building and Evaluating Open-Domain Dialogue Corpora with Clarifying Questions
%A Aliannejadi, Mohammad
%A Kiseleva, Julia
%A Chuklin, Aleksandr
%A Dalton, Jeff
%A Burtsev, Mikhail
%S Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online and Punta Cana, Dominican Republic
%F aliannejadi-etal-2021-building
%X Enabling open-domain dialogue systems to ask clarifying questions when appropriate is an important direction for improving the quality of the system response. Namely, for cases when a user request is not specific enough for a conversation system to provide an answer right away, it is desirable to ask a clarifying question to increase the chances of retrieving a satisfying answer. To address the problem of ‘asking clarifying questions in open-domain dialogues’: (1) we collect and release a new dataset focused on open-domain single- and multi-turn conversations, (2) we benchmark several state-of-the-art neural baselines, and (3) we propose a pipeline consisting of offline and online steps for evaluating the quality of clarifying questions in various dialogues. These contributions are suitable as a foundation for further research.
%R 10.18653/v1/2021.emnlp-main.367
%U https://aclanthology.org/2021.emnlp-main.367
%U https://doi.org/10.18653/v1/2021.emnlp-main.367
%P 4473-4484
Markdown (Informal)
[Building and Evaluating Open-Domain Dialogue Corpora with Clarifying Questions](https://aclanthology.org/2021.emnlp-main.367) (Aliannejadi et al., EMNLP 2021)
ACL