@inproceedings{aksenova-etal-2021-might,
title = "How Might We Create Better Benchmarks for Speech Recognition?",
author = {Aks{\"e}nova, Al{\"e}na and
van Esch, Daan and
Flynn, James and
Golik, Pavel},
editor = "Church, Kenneth and
Liberman, Mark and
Kordoni, Valia",
booktitle = "Proceedings of the 1st Workshop on Benchmarking: Past, Present and Future",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.bppf-1.4",
doi = "10.18653/v1/2021.bppf-1.4",
pages = "22--34",
abstract = "The applications of automatic speech recognition (ASR) systems are proliferating, in part due to recent significant quality improvements. However, as recent work indicates, even state-of-the-art speech recognition systems {--} some which deliver impressive benchmark results, struggle to generalize across use cases. We review relevant work, and, hoping to inform future benchmark development, outline a taxonomy of speech recognition use cases, proposed for the next generation of ASR benchmarks. We also survey work on metrics, in addition to the de facto standard Word Error Rate (WER) metric, and we introduce a versatile framework designed to describe interactions between linguistic variation and ASR performance metrics.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="aksenova-etal-2021-might">
<titleInfo>
<title>How Might We Create Better Benchmarks for Speech Recognition?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alëna</namePart>
<namePart type="family">Aksënova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daan</namePart>
<namePart type="family">van Esch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">James</namePart>
<namePart type="family">Flynn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pavel</namePart>
<namePart type="family">Golik</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Benchmarking: Past, Present and Future</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kenneth</namePart>
<namePart type="family">Church</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mark</namePart>
<namePart type="family">Liberman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Valia</namePart>
<namePart type="family">Kordoni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The applications of automatic speech recognition (ASR) systems are proliferating, in part due to recent significant quality improvements. However, as recent work indicates, even state-of-the-art speech recognition systems – some which deliver impressive benchmark results, struggle to generalize across use cases. We review relevant work, and, hoping to inform future benchmark development, outline a taxonomy of speech recognition use cases, proposed for the next generation of ASR benchmarks. We also survey work on metrics, in addition to the de facto standard Word Error Rate (WER) metric, and we introduce a versatile framework designed to describe interactions between linguistic variation and ASR performance metrics.</abstract>
<identifier type="citekey">aksenova-etal-2021-might</identifier>
<identifier type="doi">10.18653/v1/2021.bppf-1.4</identifier>
<location>
<url>https://aclanthology.org/2021.bppf-1.4</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>22</start>
<end>34</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T How Might We Create Better Benchmarks for Speech Recognition?
%A Aksënova, Alëna
%A van Esch, Daan
%A Flynn, James
%A Golik, Pavel
%Y Church, Kenneth
%Y Liberman, Mark
%Y Kordoni, Valia
%S Proceedings of the 1st Workshop on Benchmarking: Past, Present and Future
%D 2021
%8 August
%I Association for Computational Linguistics
%C Online
%F aksenova-etal-2021-might
%X The applications of automatic speech recognition (ASR) systems are proliferating, in part due to recent significant quality improvements. However, as recent work indicates, even state-of-the-art speech recognition systems – some which deliver impressive benchmark results, struggle to generalize across use cases. We review relevant work, and, hoping to inform future benchmark development, outline a taxonomy of speech recognition use cases, proposed for the next generation of ASR benchmarks. We also survey work on metrics, in addition to the de facto standard Word Error Rate (WER) metric, and we introduce a versatile framework designed to describe interactions between linguistic variation and ASR performance metrics.
%R 10.18653/v1/2021.bppf-1.4
%U https://aclanthology.org/2021.bppf-1.4
%U https://doi.org/10.18653/v1/2021.bppf-1.4
%P 22-34
Markdown (Informal)
[How Might We Create Better Benchmarks for Speech Recognition?](https://aclanthology.org/2021.bppf-1.4) (Aksënova et al., BPPF 2021)
ACL