@inproceedings{gee-etal-2023-compressed,
title = "Are Compressed Language Models Less Subgroup Robust?",
author = "Gee, Leonidas and
Zugarini, Andrea and
Quadrianto, Novi",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.emnlp-main.983",
doi = "10.18653/v1/2023.emnlp-main.983",
pages = "15859--15868",
abstract = "To reduce the inference cost of large language models, model compression is increasingly used to create smaller scalable models. However, little is known about their robustness to minority subgroups defined by the labels and attributes of a dataset. In this paper, we investigate the effects of 18 different compression methods and settings on the subgroup robustness of BERT language models. We show that worst-group performance does not depend on model size alone, but also on the compression method used. Additionally, we find that model compression does not always worsen the performance on minority subgroups. Altogether, our analysis serves to further research into the subgroup robustness of model compression.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gee-etal-2023-compressed">
<titleInfo>
<title>Are Compressed Language Models Less Subgroup Robust?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Leonidas</namePart>
<namePart type="family">Gee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andrea</namePart>
<namePart type="family">Zugarini</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Novi</namePart>
<namePart type="family">Quadrianto</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>To reduce the inference cost of large language models, model compression is increasingly used to create smaller scalable models. However, little is known about their robustness to minority subgroups defined by the labels and attributes of a dataset. In this paper, we investigate the effects of 18 different compression methods and settings on the subgroup robustness of BERT language models. We show that worst-group performance does not depend on model size alone, but also on the compression method used. Additionally, we find that model compression does not always worsen the performance on minority subgroups. Altogether, our analysis serves to further research into the subgroup robustness of model compression.</abstract>
<identifier type="citekey">gee-etal-2023-compressed</identifier>
<identifier type="doi">10.18653/v1/2023.emnlp-main.983</identifier>
<location>
<url>https://aclanthology.org/2023.emnlp-main.983</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>15859</start>
<end>15868</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Are Compressed Language Models Less Subgroup Robust?
%A Gee, Leonidas
%A Zugarini, Andrea
%A Quadrianto, Novi
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F gee-etal-2023-compressed
%X To reduce the inference cost of large language models, model compression is increasingly used to create smaller scalable models. However, little is known about their robustness to minority subgroups defined by the labels and attributes of a dataset. In this paper, we investigate the effects of 18 different compression methods and settings on the subgroup robustness of BERT language models. We show that worst-group performance does not depend on model size alone, but also on the compression method used. Additionally, we find that model compression does not always worsen the performance on minority subgroups. Altogether, our analysis serves to further research into the subgroup robustness of model compression.
%R 10.18653/v1/2023.emnlp-main.983
%U https://aclanthology.org/2023.emnlp-main.983
%U https://doi.org/10.18653/v1/2023.emnlp-main.983
%P 15859-15868
Markdown (Informal)
[Are Compressed Language Models Less Subgroup Robust?](https://aclanthology.org/2023.emnlp-main.983) (Gee et al., EMNLP 2023)
ACL
- Leonidas Gee, Andrea Zugarini, and Novi Quadrianto. 2023. Are Compressed Language Models Less Subgroup Robust?. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 15859–15868, Singapore. Association for Computational Linguistics.