@inproceedings{zhu-2021-mvp,
title = "{MVP}-{BERT}: Multi-Vocab Pre-training for {C}hinese {BERT}",
author = "Zhu, Wei",
editor = "Kabbara, Jad and
Lin, Haitao and
Paullada, Amandalynne and
Vamvas, Jannis",
booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: Student Research Workshop",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.acl-srw.27",
doi = "10.18653/v1/2021.acl-srw.27",
pages = "260--269",
abstract = "Despite the development of pre-trained language models (PLMs) significantly raise the performances of various Chinese natural language processing (NLP) tasks, the vocabulary (vocab) for these Chinese PLMs remains to be the one provided by Google Chinese BERT (CITATION), which is based on Chinese characters (chars). Second, the masked language model pre-training is based on a single vocab, limiting its downstream task performances. In this work, we first experimentally demonstrate that building a vocab via Chinese word segmentation (CWS) guided sub-word tokenization (SGT) can improve the performances of Chinese PLMs. Then we propose two versions of multi-vocab pre-training (MVP), Hi-MVP and AL-MVP, to improve the models{'} expressiveness. Experiments show that: (a) MVP training strategies improve PLMs{'} downstream performances, especially it can improve the PLM{'}s performances on span-level tasks; (b) our AL-MVP outperforms the recent AMBERT (CITATION) after large-scale pre-training, and it is more robust against adversarial attacks.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhu-2021-mvp">
<titleInfo>
<title>MVP-BERT: Multi-Vocab Pre-training for Chinese BERT</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wei</namePart>
<namePart type="family">Zhu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: Student Research Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jad</namePart>
<namePart type="family">Kabbara</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Haitao</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amandalynne</namePart>
<namePart type="family">Paullada</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jannis</namePart>
<namePart type="family">Vamvas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Despite the development of pre-trained language models (PLMs) significantly raise the performances of various Chinese natural language processing (NLP) tasks, the vocabulary (vocab) for these Chinese PLMs remains to be the one provided by Google Chinese BERT (CITATION), which is based on Chinese characters (chars). Second, the masked language model pre-training is based on a single vocab, limiting its downstream task performances. In this work, we first experimentally demonstrate that building a vocab via Chinese word segmentation (CWS) guided sub-word tokenization (SGT) can improve the performances of Chinese PLMs. Then we propose two versions of multi-vocab pre-training (MVP), Hi-MVP and AL-MVP, to improve the models’ expressiveness. Experiments show that: (a) MVP training strategies improve PLMs’ downstream performances, especially it can improve the PLM’s performances on span-level tasks; (b) our AL-MVP outperforms the recent AMBERT (CITATION) after large-scale pre-training, and it is more robust against adversarial attacks.</abstract>
<identifier type="citekey">zhu-2021-mvp</identifier>
<identifier type="doi">10.18653/v1/2021.acl-srw.27</identifier>
<location>
<url>https://aclanthology.org/2021.acl-srw.27</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>260</start>
<end>269</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T MVP-BERT: Multi-Vocab Pre-training for Chinese BERT
%A Zhu, Wei
%Y Kabbara, Jad
%Y Lin, Haitao
%Y Paullada, Amandalynne
%Y Vamvas, Jannis
%S Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: Student Research Workshop
%D 2021
%8 August
%I Association for Computational Linguistics
%C Online
%F zhu-2021-mvp
%X Despite the development of pre-trained language models (PLMs) significantly raise the performances of various Chinese natural language processing (NLP) tasks, the vocabulary (vocab) for these Chinese PLMs remains to be the one provided by Google Chinese BERT (CITATION), which is based on Chinese characters (chars). Second, the masked language model pre-training is based on a single vocab, limiting its downstream task performances. In this work, we first experimentally demonstrate that building a vocab via Chinese word segmentation (CWS) guided sub-word tokenization (SGT) can improve the performances of Chinese PLMs. Then we propose two versions of multi-vocab pre-training (MVP), Hi-MVP and AL-MVP, to improve the models’ expressiveness. Experiments show that: (a) MVP training strategies improve PLMs’ downstream performances, especially it can improve the PLM’s performances on span-level tasks; (b) our AL-MVP outperforms the recent AMBERT (CITATION) after large-scale pre-training, and it is more robust against adversarial attacks.
%R 10.18653/v1/2021.acl-srw.27
%U https://aclanthology.org/2021.acl-srw.27
%U https://doi.org/10.18653/v1/2021.acl-srw.27
%P 260-269
Markdown (Informal)
[MVP-BERT: Multi-Vocab Pre-training for Chinese BERT](https://aclanthology.org/2021.acl-srw.27) (Zhu, ACL-IJCNLP 2021)
ACL
- Wei Zhu. 2021. MVP-BERT: Multi-Vocab Pre-training for Chinese BERT. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: Student Research Workshop, pages 260–269, Online. Association for Computational Linguistics.