@inproceedings{papadopoulou-etal-2021-benchmarking,
title = "Benchmarking {ASR} Systems Based on Post-Editing Effort and Error Analysis",
author = "Papadopoulou, Martha Maria and
Zaretskaya, Anna and
Mitkov, Ruslan",
editor = "Mitkov, Ruslan and
Sosoni, Vilelmini and
Gigu{\`e}re, Julie Christine and
Murgolo, Elena and
Deysel, Elizabeth",
booktitle = "Proceedings of the Translation and Interpreting Technology Online Conference",
month = jul,
year = "2021",
address = "Held Online",
publisher = "INCOMA Ltd.",
url = "https://aclanthology.org/2021.triton-1.23",
pages = "199--207",
abstract = "This paper offers a comparative evaluation of four commercial ASR systems which are evaluated according to the post-editing effort required to reach {``}publishable{''} quality and according to the number of errors they produce. For the error annotation task, an original error typology for transcription errors is proposed. This study also seeks to examine whether there is a difference in the performance of these systems between native and non-native English speakers. The experimental results suggest that among the four systems, Trint obtains the best scores. It is also observed that most systems perform noticeably better with native speakers and that all systems are most prone to fluency errors.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="papadopoulou-etal-2021-benchmarking">
<titleInfo>
<title>Benchmarking ASR Systems Based on Post-Editing Effort and Error Analysis</title>
</titleInfo>
<name type="personal">
<namePart type="given">Martha</namePart>
<namePart type="given">Maria</namePart>
<namePart type="family">Papadopoulou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Zaretskaya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ruslan</namePart>
<namePart type="family">Mitkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Translation and Interpreting Technology Online Conference</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ruslan</namePart>
<namePart type="family">Mitkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vilelmini</namePart>
<namePart type="family">Sosoni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julie</namePart>
<namePart type="given">Christine</namePart>
<namePart type="family">Giguère</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elena</namePart>
<namePart type="family">Murgolo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elizabeth</namePart>
<namePart type="family">Deysel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd.</publisher>
<place>
<placeTerm type="text">Held Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper offers a comparative evaluation of four commercial ASR systems which are evaluated according to the post-editing effort required to reach “publishable” quality and according to the number of errors they produce. For the error annotation task, an original error typology for transcription errors is proposed. This study also seeks to examine whether there is a difference in the performance of these systems between native and non-native English speakers. The experimental results suggest that among the four systems, Trint obtains the best scores. It is also observed that most systems perform noticeably better with native speakers and that all systems are most prone to fluency errors.</abstract>
<identifier type="citekey">papadopoulou-etal-2021-benchmarking</identifier>
<location>
<url>https://aclanthology.org/2021.triton-1.23</url>
</location>
<part>
<date>2021-07</date>
<extent unit="page">
<start>199</start>
<end>207</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Benchmarking ASR Systems Based on Post-Editing Effort and Error Analysis
%A Papadopoulou, Martha Maria
%A Zaretskaya, Anna
%A Mitkov, Ruslan
%Y Mitkov, Ruslan
%Y Sosoni, Vilelmini
%Y Giguère, Julie Christine
%Y Murgolo, Elena
%Y Deysel, Elizabeth
%S Proceedings of the Translation and Interpreting Technology Online Conference
%D 2021
%8 July
%I INCOMA Ltd.
%C Held Online
%F papadopoulou-etal-2021-benchmarking
%X This paper offers a comparative evaluation of four commercial ASR systems which are evaluated according to the post-editing effort required to reach “publishable” quality and according to the number of errors they produce. For the error annotation task, an original error typology for transcription errors is proposed. This study also seeks to examine whether there is a difference in the performance of these systems between native and non-native English speakers. The experimental results suggest that among the four systems, Trint obtains the best scores. It is also observed that most systems perform noticeably better with native speakers and that all systems are most prone to fluency errors.
%U https://aclanthology.org/2021.triton-1.23
%P 199-207
Markdown (Informal)
[Benchmarking ASR Systems Based on Post-Editing Effort and Error Analysis](https://aclanthology.org/2021.triton-1.23) (Papadopoulou et al., TRITON 2021)
ACL