@inproceedings{ladkat-etal-2022-towards,
title = "Towards Simple and Efficient Task-Adaptive Pre-training for Text Classification",
author = "Ladkat, Arnav and
Miyajiwala, Aamir and
Jagadale, Samiksha and
Kulkarni, Rekha A. and
Joshi, Raviraj",
editor = "He, Yulan and
Ji, Heng and
Li, Sujian and
Liu, Yang and
Chang, Chua-Hui",
booktitle = "Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)",
month = nov,
year = "2022",
address = "Online only",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.aacl-short.39/",
doi = "10.18653/v1/2022.aacl-short.39",
pages = "320--325",
abstract = "Language models are pre-trained using large corpora of generic data like book corpus, com- mon crawl and Wikipedia, which is essential for the model to understand the linguistic characteristics of the language. New studies suggest using Domain Adaptive Pre-training (DAPT) and Task-Adaptive Pre-training (TAPT) as an intermediate step before the final finetuning task. This step helps cover the target domain vocabulary and improves the model performance on the downstream task. In this work, we study the impact of training only the embedding layer on the model`s performance during TAPT and task-specific finetuning. Based on our study, we propose a simple approach to make the in- termediate step of TAPT for BERT-based mod- els more efficient by performing selective pre-training of BERT layers. We show that training only the BERT embedding layer during TAPT is sufficient to adapt to the vocabulary of the target domain and achieve comparable performance. Our approach is computationally efficient, with 78{\%} fewer parameters trained during TAPT. The proposed embedding layer finetuning approach can also be an efficient domain adaptation technique."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ladkat-etal-2022-towards">
<titleInfo>
<title>Towards Simple and Efficient Task-Adaptive Pre-training for Text Classification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Arnav</namePart>
<namePart type="family">Ladkat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aamir</namePart>
<namePart type="family">Miyajiwala</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Samiksha</namePart>
<namePart type="family">Jagadale</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rekha</namePart>
<namePart type="given">A</namePart>
<namePart type="family">Kulkarni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Raviraj</namePart>
<namePart type="family">Joshi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yulan</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Heng</namePart>
<namePart type="family">Ji</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sujian</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chua-Hui</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online only</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Language models are pre-trained using large corpora of generic data like book corpus, com- mon crawl and Wikipedia, which is essential for the model to understand the linguistic characteristics of the language. New studies suggest using Domain Adaptive Pre-training (DAPT) and Task-Adaptive Pre-training (TAPT) as an intermediate step before the final finetuning task. This step helps cover the target domain vocabulary and improves the model performance on the downstream task. In this work, we study the impact of training only the embedding layer on the model‘s performance during TAPT and task-specific finetuning. Based on our study, we propose a simple approach to make the in- termediate step of TAPT for BERT-based mod- els more efficient by performing selective pre-training of BERT layers. We show that training only the BERT embedding layer during TAPT is sufficient to adapt to the vocabulary of the target domain and achieve comparable performance. Our approach is computationally efficient, with 78% fewer parameters trained during TAPT. The proposed embedding layer finetuning approach can also be an efficient domain adaptation technique.</abstract>
<identifier type="citekey">ladkat-etal-2022-towards</identifier>
<identifier type="doi">10.18653/v1/2022.aacl-short.39</identifier>
<location>
<url>https://aclanthology.org/2022.aacl-short.39/</url>
</location>
<part>
<date>2022-11</date>
<extent unit="page">
<start>320</start>
<end>325</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Towards Simple and Efficient Task-Adaptive Pre-training for Text Classification
%A Ladkat, Arnav
%A Miyajiwala, Aamir
%A Jagadale, Samiksha
%A Kulkarni, Rekha A.
%A Joshi, Raviraj
%Y He, Yulan
%Y Ji, Heng
%Y Li, Sujian
%Y Liu, Yang
%Y Chang, Chua-Hui
%S Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)
%D 2022
%8 November
%I Association for Computational Linguistics
%C Online only
%F ladkat-etal-2022-towards
%X Language models are pre-trained using large corpora of generic data like book corpus, com- mon crawl and Wikipedia, which is essential for the model to understand the linguistic characteristics of the language. New studies suggest using Domain Adaptive Pre-training (DAPT) and Task-Adaptive Pre-training (TAPT) as an intermediate step before the final finetuning task. This step helps cover the target domain vocabulary and improves the model performance on the downstream task. In this work, we study the impact of training only the embedding layer on the model‘s performance during TAPT and task-specific finetuning. Based on our study, we propose a simple approach to make the in- termediate step of TAPT for BERT-based mod- els more efficient by performing selective pre-training of BERT layers. We show that training only the BERT embedding layer during TAPT is sufficient to adapt to the vocabulary of the target domain and achieve comparable performance. Our approach is computationally efficient, with 78% fewer parameters trained during TAPT. The proposed embedding layer finetuning approach can also be an efficient domain adaptation technique.
%R 10.18653/v1/2022.aacl-short.39
%U https://aclanthology.org/2022.aacl-short.39/
%U https://doi.org/10.18653/v1/2022.aacl-short.39
%P 320-325
Markdown (Informal)
[Towards Simple and Efficient Task-Adaptive Pre-training for Text Classification](https://aclanthology.org/2022.aacl-short.39/) (Ladkat et al., AACL-IJCNLP 2022)
ACL
- Arnav Ladkat, Aamir Miyajiwala, Samiksha Jagadale, Rekha A. Kulkarni, and Raviraj Joshi. 2022. Towards Simple and Efficient Task-Adaptive Pre-training for Text Classification. In Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 320–325, Online only. Association for Computational Linguistics.