@inproceedings{faisal-etal-2021-sd-qa,
title = "{SD}-{QA}: Spoken Dialectal Question Answering for the Real World",
author = "Faisal, Fahim and
Keshava, Sharlina and
Alam, Md Mahfuz Ibn and
Anastasopoulos, Antonios",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.findings-emnlp.281",
doi = "10.18653/v1/2021.findings-emnlp.281",
pages = "3296--3315",
abstract = "Question answering (QA) systems are now available through numerous commercial applications for a wide variety of domains, serving millions of users that interact with them via speech interfaces. However, current benchmarks in QA research do not account for the errors that speech recognition models might introduce, nor do they consider the language variations (dialects) of the users. To address this gap, we augment an existing QA dataset to construct a multi-dialect, spoken QA benchmark on five languages (Arabic, Bengali, English, Kiswahili, Korean) with more than 68k audio prompts in 24 dialects from 255 speakers. We provide baseline results showcasing the real-world performance of QA systems and analyze the effect of language variety and other sensitive speaker attributes on downstream performance. Last, we study the fairness of the ASR and QA models with respect to the underlying user populations.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="faisal-etal-2021-sd-qa">
<titleInfo>
<title>SD-QA: Spoken Dialectal Question Answering for the Real World</title>
</titleInfo>
<name type="personal">
<namePart type="given">Fahim</namePart>
<namePart type="family">Faisal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sharlina</namePart>
<namePart type="family">Keshava</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Md</namePart>
<namePart type="given">Mahfuz</namePart>
<namePart type="given">Ibn</namePart>
<namePart type="family">Alam</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Antonios</namePart>
<namePart type="family">Anastasopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2021</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Question answering (QA) systems are now available through numerous commercial applications for a wide variety of domains, serving millions of users that interact with them via speech interfaces. However, current benchmarks in QA research do not account for the errors that speech recognition models might introduce, nor do they consider the language variations (dialects) of the users. To address this gap, we augment an existing QA dataset to construct a multi-dialect, spoken QA benchmark on five languages (Arabic, Bengali, English, Kiswahili, Korean) with more than 68k audio prompts in 24 dialects from 255 speakers. We provide baseline results showcasing the real-world performance of QA systems and analyze the effect of language variety and other sensitive speaker attributes on downstream performance. Last, we study the fairness of the ASR and QA models with respect to the underlying user populations.</abstract>
<identifier type="citekey">faisal-etal-2021-sd-qa</identifier>
<identifier type="doi">10.18653/v1/2021.findings-emnlp.281</identifier>
<location>
<url>https://aclanthology.org/2021.findings-emnlp.281</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>3296</start>
<end>3315</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T SD-QA: Spoken Dialectal Question Answering for the Real World
%A Faisal, Fahim
%A Keshava, Sharlina
%A Alam, Md Mahfuz Ibn
%A Anastasopoulos, Antonios
%S Findings of the Association for Computational Linguistics: EMNLP 2021
%D 2021
%8 November
%I Association for Computational Linguistics
%C Punta Cana, Dominican Republic
%F faisal-etal-2021-sd-qa
%X Question answering (QA) systems are now available through numerous commercial applications for a wide variety of domains, serving millions of users that interact with them via speech interfaces. However, current benchmarks in QA research do not account for the errors that speech recognition models might introduce, nor do they consider the language variations (dialects) of the users. To address this gap, we augment an existing QA dataset to construct a multi-dialect, spoken QA benchmark on five languages (Arabic, Bengali, English, Kiswahili, Korean) with more than 68k audio prompts in 24 dialects from 255 speakers. We provide baseline results showcasing the real-world performance of QA systems and analyze the effect of language variety and other sensitive speaker attributes on downstream performance. Last, we study the fairness of the ASR and QA models with respect to the underlying user populations.
%R 10.18653/v1/2021.findings-emnlp.281
%U https://aclanthology.org/2021.findings-emnlp.281
%U https://doi.org/10.18653/v1/2021.findings-emnlp.281
%P 3296-3315
Markdown (Informal)
[SD-QA: Spoken Dialectal Question Answering for the Real World](https://aclanthology.org/2021.findings-emnlp.281) (Faisal et al., Findings 2021)
ACL
- Fahim Faisal, Sharlina Keshava, Md Mahfuz Ibn Alam, and Antonios Anastasopoulos. 2021. SD-QA: Spoken Dialectal Question Answering for the Real World. In Findings of the Association for Computational Linguistics: EMNLP 2021, pages 3296–3315, Punta Cana, Dominican Republic. Association for Computational Linguistics.