@inproceedings{wang-etal-2021-cs,
title = "{CS}-{BERT}: a pretrained model for customer service dialogues",
author = "Wang, Peiyao and
Fang, Joyce and
Reinspach, Julia",
editor = "Papangelis, Alexandros and
Budzianowski, Pawe{\l} and
Liu, Bing and
Nouri, Elnaz and
Rastogi, Abhinav and
Chen, Yun-Nung",
booktitle = "Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI",
month = nov,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.nlp4convai-1.13",
doi = "10.18653/v1/2021.nlp4convai-1.13",
pages = "130--142",
abstract = "Large-scale pretrained transformer models have demonstrated state-of-the-art (SOTA) performance in a variety of NLP tasks. Nowadays, numerous pretrained models are available in different model flavors and different languages, and can be easily adapted to one{'}s downstream task. However, only a limited number of models are available for dialogue tasks, and in particular, goal-oriented dialogue tasks. In addition, the available pretrained models are trained on general domain language, creating a mismatch between the pretraining language and the downstream domain launguage. In this contribution, we present CS-BERT, a BERT model pretrained on millions of dialogues in the customer service domain. We evaluate CS-BERT on several downstream customer service dialogue tasks, and demonstrate that our in-domain pretraining is advantageous compared to other pretrained models in both zero-shot experiments as well as in finetuning experiments, especially in a low-resource data setting.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wang-etal-2021-cs">
<titleInfo>
<title>CS-BERT: a pretrained model for customer service dialogues</title>
</titleInfo>
<name type="personal">
<namePart type="given">Peiyao</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Fang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julia</namePart>
<namePart type="family">Reinspach</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alexandros</namePart>
<namePart type="family">Papangelis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Paweł</namePart>
<namePart type="family">Budzianowski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bing</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elnaz</namePart>
<namePart type="family">Nouri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Abhinav</namePart>
<namePart type="family">Rastogi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large-scale pretrained transformer models have demonstrated state-of-the-art (SOTA) performance in a variety of NLP tasks. Nowadays, numerous pretrained models are available in different model flavors and different languages, and can be easily adapted to one’s downstream task. However, only a limited number of models are available for dialogue tasks, and in particular, goal-oriented dialogue tasks. In addition, the available pretrained models are trained on general domain language, creating a mismatch between the pretraining language and the downstream domain launguage. In this contribution, we present CS-BERT, a BERT model pretrained on millions of dialogues in the customer service domain. We evaluate CS-BERT on several downstream customer service dialogue tasks, and demonstrate that our in-domain pretraining is advantageous compared to other pretrained models in both zero-shot experiments as well as in finetuning experiments, especially in a low-resource data setting.</abstract>
<identifier type="citekey">wang-etal-2021-cs</identifier>
<identifier type="doi">10.18653/v1/2021.nlp4convai-1.13</identifier>
<location>
<url>https://aclanthology.org/2021.nlp4convai-1.13</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>130</start>
<end>142</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T CS-BERT: a pretrained model for customer service dialogues
%A Wang, Peiyao
%A Fang, Joyce
%A Reinspach, Julia
%Y Papangelis, Alexandros
%Y Budzianowski, Paweł
%Y Liu, Bing
%Y Nouri, Elnaz
%Y Rastogi, Abhinav
%Y Chen, Yun-Nung
%S Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online
%F wang-etal-2021-cs
%X Large-scale pretrained transformer models have demonstrated state-of-the-art (SOTA) performance in a variety of NLP tasks. Nowadays, numerous pretrained models are available in different model flavors and different languages, and can be easily adapted to one’s downstream task. However, only a limited number of models are available for dialogue tasks, and in particular, goal-oriented dialogue tasks. In addition, the available pretrained models are trained on general domain language, creating a mismatch between the pretraining language and the downstream domain launguage. In this contribution, we present CS-BERT, a BERT model pretrained on millions of dialogues in the customer service domain. We evaluate CS-BERT on several downstream customer service dialogue tasks, and demonstrate that our in-domain pretraining is advantageous compared to other pretrained models in both zero-shot experiments as well as in finetuning experiments, especially in a low-resource data setting.
%R 10.18653/v1/2021.nlp4convai-1.13
%U https://aclanthology.org/2021.nlp4convai-1.13
%U https://doi.org/10.18653/v1/2021.nlp4convai-1.13
%P 130-142
Markdown (Informal)
[CS-BERT: a pretrained model for customer service dialogues](https://aclanthology.org/2021.nlp4convai-1.13) (Wang et al., NLP4ConvAI 2021)
ACL