@inproceedings{torbarina-etal-2021-speeding,
title = "{S}peeding Up {T}ransformer Training By Using Dataset Subsampling - {A}n Exploratory Analysis",
author = "Torbarina, Lovre and
Mihel{\v{c}}i{\'c}, Velimir and
{\v{S}}arlija, Bruno and
Roguski, Lukasz and
Kraljevi{\'c}, {\v{Z}}eljko",
editor = "Moosavi, Nafise Sadat and
Gurevych, Iryna and
Fan, Angela and
Wolf, Thomas and
Hou, Yufang and
Marasovi{\'c}, Ana and
Ravi, Sujith",
booktitle = "Proceedings of the Second Workshop on Simple and Efficient Natural Language Processing",
month = nov,
year = "2021",
address = "Virtual",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.sustainlp-1.11",
doi = "10.18653/v1/2021.sustainlp-1.11",
pages = "86--95",
abstract = "Transformer-based models have greatly advanced the progress in the field of the natural language processing and while they achieve state-of-the-art results on a wide range of tasks, they are cumbersome in parameter size. Subsequently, even when pre-trained transformer models are used for fine-tuning on a given task, if the dataset is large, it may still not be feasible to fine-tune the model within a reasonable time. For this reason, we empirically test 8 subsampling methods for reducing the dataset size on text classification task and report the trade-off between metric score and training time. 7 out of 8 methods are simple methods, while the last one is CRAIG, a method for coreset construction for data-efficient model training. We obtain the best result with the CRAIG method, offering an average decrease of 0.03 points in f-score on test set while speeding up the training time on average by 63.93{\%}, relative to the score and time obtained by using the full dataset. Lastly, we show the trade-off between speed and performance for all sampling methods on three different datasets.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="torbarina-etal-2021-speeding">
<titleInfo>
<title>Speeding Up Transformer Training By Using Dataset Subsampling - An Exploratory Analysis</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lovre</namePart>
<namePart type="family">Torbarina</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Velimir</namePart>
<namePart type="family">Mihelčić</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bruno</namePart>
<namePart type="family">Šarlija</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lukasz</namePart>
<namePart type="family">Roguski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Željko</namePart>
<namePart type="family">Kraljević</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Simple and Efficient Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nafise</namePart>
<namePart type="given">Sadat</namePart>
<namePart type="family">Moosavi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Iryna</namePart>
<namePart type="family">Gurevych</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Angela</namePart>
<namePart type="family">Fan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thomas</namePart>
<namePart type="family">Wolf</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yufang</namePart>
<namePart type="family">Hou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ana</namePart>
<namePart type="family">Marasović</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sujith</namePart>
<namePart type="family">Ravi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Virtual</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Transformer-based models have greatly advanced the progress in the field of the natural language processing and while they achieve state-of-the-art results on a wide range of tasks, they are cumbersome in parameter size. Subsequently, even when pre-trained transformer models are used for fine-tuning on a given task, if the dataset is large, it may still not be feasible to fine-tune the model within a reasonable time. For this reason, we empirically test 8 subsampling methods for reducing the dataset size on text classification task and report the trade-off between metric score and training time. 7 out of 8 methods are simple methods, while the last one is CRAIG, a method for coreset construction for data-efficient model training. We obtain the best result with the CRAIG method, offering an average decrease of 0.03 points in f-score on test set while speeding up the training time on average by 63.93%, relative to the score and time obtained by using the full dataset. Lastly, we show the trade-off between speed and performance for all sampling methods on three different datasets.</abstract>
<identifier type="citekey">torbarina-etal-2021-speeding</identifier>
<identifier type="doi">10.18653/v1/2021.sustainlp-1.11</identifier>
<location>
<url>https://aclanthology.org/2021.sustainlp-1.11</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>86</start>
<end>95</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Speeding Up Transformer Training By Using Dataset Subsampling - An Exploratory Analysis
%A Torbarina, Lovre
%A Mihelčić, Velimir
%A Šarlija, Bruno
%A Roguski, Lukasz
%A Kraljević, Željko
%Y Moosavi, Nafise Sadat
%Y Gurevych, Iryna
%Y Fan, Angela
%Y Wolf, Thomas
%Y Hou, Yufang
%Y Marasović, Ana
%Y Ravi, Sujith
%S Proceedings of the Second Workshop on Simple and Efficient Natural Language Processing
%D 2021
%8 November
%I Association for Computational Linguistics
%C Virtual
%F torbarina-etal-2021-speeding
%X Transformer-based models have greatly advanced the progress in the field of the natural language processing and while they achieve state-of-the-art results on a wide range of tasks, they are cumbersome in parameter size. Subsequently, even when pre-trained transformer models are used for fine-tuning on a given task, if the dataset is large, it may still not be feasible to fine-tune the model within a reasonable time. For this reason, we empirically test 8 subsampling methods for reducing the dataset size on text classification task and report the trade-off between metric score and training time. 7 out of 8 methods are simple methods, while the last one is CRAIG, a method for coreset construction for data-efficient model training. We obtain the best result with the CRAIG method, offering an average decrease of 0.03 points in f-score on test set while speeding up the training time on average by 63.93%, relative to the score and time obtained by using the full dataset. Lastly, we show the trade-off between speed and performance for all sampling methods on three different datasets.
%R 10.18653/v1/2021.sustainlp-1.11
%U https://aclanthology.org/2021.sustainlp-1.11
%U https://doi.org/10.18653/v1/2021.sustainlp-1.11
%P 86-95
Markdown (Informal)
[Speeding Up Transformer Training By Using Dataset Subsampling - An Exploratory Analysis](https://aclanthology.org/2021.sustainlp-1.11) (Torbarina et al., sustainlp 2021)
ACL