@inproceedings{biswas-etal-2020-semi,
title = "Semi-supervised acoustic and language model training for {E}nglish-isi{Z}ulu code-switched speech recognition",
author = "Biswas, Astik and
De Wet, Febe and
Van der westhuizen, Ewald and
Niesler, Thomas",
booktitle = "Proceedings of the The 4th Workshop on Computational Approaches to Code Switching",
month = may,
year = "2020",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://aclanthology.org/2020.calcs-1.7",
pages = "52--56",
abstract = "We present an analysis of semi-supervised acoustic and language model training for English-isiZulu code-switched (CS) ASR using soap opera speech. Approximately 11 hours of untranscribed multilingual speech was transcribed automatically using four bilingual CS transcription systems operating in English-isiZulu, English-isiXhosa, English-Setswana and English-Sesotho. These transcriptions were incorporated into the acoustic and language model training sets. Results showed that the TDNN-F acoustic models benefit from the additional semi-supervised data and that even better performance could be achieved by including additional CNN layers. Using these CNN-TDNN-F acoustic models, a first iteration of semi-supervised training achieved an absolute mixed-language WER reduction of 3.44{\%}, and a further 2.18{\%} after a second iteration. Although the languages in the untranscribed data were unknown, the best results were obtained when all automatically transcribed data was used for training and not just the utterances classified as English-isiZulu. Despite perplexity improvements, the semi-supervised language model was not able to improve the ASR performance.",
language = "English",
ISBN = "979-10-95546-66-5",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="biswas-etal-2020-semi">
<titleInfo>
<title>Semi-supervised acoustic and language model training for English-isiZulu code-switched speech recognition</title>
</titleInfo>
<name type="personal">
<namePart type="given">Astik</namePart>
<namePart type="family">Biswas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Febe</namePart>
<namePart type="family">De Wet</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ewald</namePart>
<namePart type="family">Van der westhuizen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thomas</namePart>
<namePart type="family">Niesler</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<language>
<languageTerm type="text">English</languageTerm>
<languageTerm type="code" authority="iso639-2b">eng</languageTerm>
</language>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the The 4th Workshop on Computational Approaches to Code Switching</title>
</titleInfo>
<originInfo>
<publisher>European Language Resources Association</publisher>
<place>
<placeTerm type="text">Marseille, France</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-10-95546-66-5</identifier>
</relatedItem>
<abstract>We present an analysis of semi-supervised acoustic and language model training for English-isiZulu code-switched (CS) ASR using soap opera speech. Approximately 11 hours of untranscribed multilingual speech was transcribed automatically using four bilingual CS transcription systems operating in English-isiZulu, English-isiXhosa, English-Setswana and English-Sesotho. These transcriptions were incorporated into the acoustic and language model training sets. Results showed that the TDNN-F acoustic models benefit from the additional semi-supervised data and that even better performance could be achieved by including additional CNN layers. Using these CNN-TDNN-F acoustic models, a first iteration of semi-supervised training achieved an absolute mixed-language WER reduction of 3.44%, and a further 2.18% after a second iteration. Although the languages in the untranscribed data were unknown, the best results were obtained when all automatically transcribed data was used for training and not just the utterances classified as English-isiZulu. Despite perplexity improvements, the semi-supervised language model was not able to improve the ASR performance.</abstract>
<identifier type="citekey">biswas-etal-2020-semi</identifier>
<location>
<url>https://aclanthology.org/2020.calcs-1.7</url>
</location>
<part>
<date>2020-05</date>
<extent unit="page">
<start>52</start>
<end>56</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Semi-supervised acoustic and language model training for English-isiZulu code-switched speech recognition
%A Biswas, Astik
%A De Wet, Febe
%A Van der westhuizen, Ewald
%A Niesler, Thomas
%S Proceedings of the The 4th Workshop on Computational Approaches to Code Switching
%D 2020
%8 May
%I European Language Resources Association
%C Marseille, France
%@ 979-10-95546-66-5
%G English
%F biswas-etal-2020-semi
%X We present an analysis of semi-supervised acoustic and language model training for English-isiZulu code-switched (CS) ASR using soap opera speech. Approximately 11 hours of untranscribed multilingual speech was transcribed automatically using four bilingual CS transcription systems operating in English-isiZulu, English-isiXhosa, English-Setswana and English-Sesotho. These transcriptions were incorporated into the acoustic and language model training sets. Results showed that the TDNN-F acoustic models benefit from the additional semi-supervised data and that even better performance could be achieved by including additional CNN layers. Using these CNN-TDNN-F acoustic models, a first iteration of semi-supervised training achieved an absolute mixed-language WER reduction of 3.44%, and a further 2.18% after a second iteration. Although the languages in the untranscribed data were unknown, the best results were obtained when all automatically transcribed data was used for training and not just the utterances classified as English-isiZulu. Despite perplexity improvements, the semi-supervised language model was not able to improve the ASR performance.
%U https://aclanthology.org/2020.calcs-1.7
%P 52-56
Markdown (Informal)
[Semi-supervised acoustic and language model training for English-isiZulu code-switched speech recognition](https://aclanthology.org/2020.calcs-1.7) (Biswas et al., CALCS 2020)
ACL