@inproceedings{singla-etal-2023-e2e,
title = "{E}2{E} Spoken Entity Extraction for Virtual Agents",
author = "Singla, Karan and
Kim, Yeon-Jun and
Bangalore, Srinivas",
editor = "Wang, Mingxuan and
Zitouni, Imed",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.emnlp-industry.54",
doi = "10.18653/v1/2023.emnlp-industry.54",
pages = "567--574",
abstract = "In human-computer conversations, extracting entities such as names, street addresses and email addresses from speech is a challenging task. In this paper, we study the impact of fine-tuning pre-trained speech encoders on extracting spoken entities in human-readable form directly from speech without the need for text transcription. We illustrate that such a direct approach optimizes the encoder to transcribe only the entity relevant portions of speech ignoring the superfluous portions such as carrier phrases, or spell name entities. In the context of dialog from an enterprise virtual agent, we demonstrate that the 1-step approach outperforms the typical 2-step approach which first generates lexical transcriptions followed by text-based entity extraction for identifying spoken entities.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="singla-etal-2023-e2e">
<titleInfo>
<title>E2E Spoken Entity Extraction for Virtual Agents</title>
</titleInfo>
<name type="personal">
<namePart type="given">Karan</namePart>
<namePart type="family">Singla</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yeon-Jun</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Srinivas</namePart>
<namePart type="family">Bangalore</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mingxuan</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Imed</namePart>
<namePart type="family">Zitouni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In human-computer conversations, extracting entities such as names, street addresses and email addresses from speech is a challenging task. In this paper, we study the impact of fine-tuning pre-trained speech encoders on extracting spoken entities in human-readable form directly from speech without the need for text transcription. We illustrate that such a direct approach optimizes the encoder to transcribe only the entity relevant portions of speech ignoring the superfluous portions such as carrier phrases, or spell name entities. In the context of dialog from an enterprise virtual agent, we demonstrate that the 1-step approach outperforms the typical 2-step approach which first generates lexical transcriptions followed by text-based entity extraction for identifying spoken entities.</abstract>
<identifier type="citekey">singla-etal-2023-e2e</identifier>
<identifier type="doi">10.18653/v1/2023.emnlp-industry.54</identifier>
<location>
<url>https://aclanthology.org/2023.emnlp-industry.54</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>567</start>
<end>574</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T E2E Spoken Entity Extraction for Virtual Agents
%A Singla, Karan
%A Kim, Yeon-Jun
%A Bangalore, Srinivas
%Y Wang, Mingxuan
%Y Zitouni, Imed
%S Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F singla-etal-2023-e2e
%X In human-computer conversations, extracting entities such as names, street addresses and email addresses from speech is a challenging task. In this paper, we study the impact of fine-tuning pre-trained speech encoders on extracting spoken entities in human-readable form directly from speech without the need for text transcription. We illustrate that such a direct approach optimizes the encoder to transcribe only the entity relevant portions of speech ignoring the superfluous portions such as carrier phrases, or spell name entities. In the context of dialog from an enterprise virtual agent, we demonstrate that the 1-step approach outperforms the typical 2-step approach which first generates lexical transcriptions followed by text-based entity extraction for identifying spoken entities.
%R 10.18653/v1/2023.emnlp-industry.54
%U https://aclanthology.org/2023.emnlp-industry.54
%U https://doi.org/10.18653/v1/2023.emnlp-industry.54
%P 567-574
Markdown (Informal)
[E2E Spoken Entity Extraction for Virtual Agents](https://aclanthology.org/2023.emnlp-industry.54) (Singla et al., EMNLP 2023)
ACL
- Karan Singla, Yeon-Jun Kim, and Srinivas Bangalore. 2023. E2E Spoken Entity Extraction for Virtual Agents. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track, pages 567–574, Singapore. Association for Computational Linguistics.