@inproceedings{falissard-etal-2023-improving,
title = "Improving generalization in large langue model by learning prefix subspaces",
author = "Falissard, Louis and
Guigue, Vincent and
Soulier, Laure",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-emnlp.768",
doi = "10.18653/v1/2023.findings-emnlp.768",
pages = "11474--11483",
abstract = "This article focuses on large language models (LLMs) fine-tuning in the scarce data regime (also known as {``}few-shot learning setting{''}). We propose a method to increase the generalization capabilities of LLMs based on neural network subspaces. This optimization method, recently introduced in computer vision, aims to improve model generalization by identifying wider local optima through the joint optimization of an entire simplex of models in parameter space. Although this property would be highly beneficial in the context of training large language models in the {``}few-shot learning{''} setting, its adaptation to massive, pretrained transformers poses some challenges. First, their considerable number of parameters make it difficult to train several model jointly, and second, their deterministic parameter initialisation schemes make them unfit to the subspace method as originaly proposed. We show in this paper that its application to {``}Parameter Efficient Fine-Tuning{''} (PEFT) methods, however, is relatively natural, and we propose to apply it to prefix-tuning, by learning entire simplexes of continous prefixes. We test our method on a variant of the GLUE benchmark adapted to the few-shot learning setting, and show that both our contributions (learning prefix simplexes, and non-deterministic validation metric inference) jointly lead to a gain in average performances compared to state of the art methods.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="falissard-etal-2023-improving">
<titleInfo>
<title>Improving generalization in large langue model by learning prefix subspaces</title>
</titleInfo>
<name type="personal">
<namePart type="given">Louis</namePart>
<namePart type="family">Falissard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vincent</namePart>
<namePart type="family">Guigue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Laure</namePart>
<namePart type="family">Soulier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This article focuses on large language models (LLMs) fine-tuning in the scarce data regime (also known as “few-shot learning setting”). We propose a method to increase the generalization capabilities of LLMs based on neural network subspaces. This optimization method, recently introduced in computer vision, aims to improve model generalization by identifying wider local optima through the joint optimization of an entire simplex of models in parameter space. Although this property would be highly beneficial in the context of training large language models in the “few-shot learning” setting, its adaptation to massive, pretrained transformers poses some challenges. First, their considerable number of parameters make it difficult to train several model jointly, and second, their deterministic parameter initialisation schemes make them unfit to the subspace method as originaly proposed. We show in this paper that its application to “Parameter Efficient Fine-Tuning” (PEFT) methods, however, is relatively natural, and we propose to apply it to prefix-tuning, by learning entire simplexes of continous prefixes. We test our method on a variant of the GLUE benchmark adapted to the few-shot learning setting, and show that both our contributions (learning prefix simplexes, and non-deterministic validation metric inference) jointly lead to a gain in average performances compared to state of the art methods.</abstract>
<identifier type="citekey">falissard-etal-2023-improving</identifier>
<identifier type="doi">10.18653/v1/2023.findings-emnlp.768</identifier>
<location>
<url>https://aclanthology.org/2023.findings-emnlp.768</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>11474</start>
<end>11483</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Improving generalization in large langue model by learning prefix subspaces
%A Falissard, Louis
%A Guigue, Vincent
%A Soulier, Laure
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Findings of the Association for Computational Linguistics: EMNLP 2023
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F falissard-etal-2023-improving
%X This article focuses on large language models (LLMs) fine-tuning in the scarce data regime (also known as “few-shot learning setting”). We propose a method to increase the generalization capabilities of LLMs based on neural network subspaces. This optimization method, recently introduced in computer vision, aims to improve model generalization by identifying wider local optima through the joint optimization of an entire simplex of models in parameter space. Although this property would be highly beneficial in the context of training large language models in the “few-shot learning” setting, its adaptation to massive, pretrained transformers poses some challenges. First, their considerable number of parameters make it difficult to train several model jointly, and second, their deterministic parameter initialisation schemes make them unfit to the subspace method as originaly proposed. We show in this paper that its application to “Parameter Efficient Fine-Tuning” (PEFT) methods, however, is relatively natural, and we propose to apply it to prefix-tuning, by learning entire simplexes of continous prefixes. We test our method on a variant of the GLUE benchmark adapted to the few-shot learning setting, and show that both our contributions (learning prefix simplexes, and non-deterministic validation metric inference) jointly lead to a gain in average performances compared to state of the art methods.
%R 10.18653/v1/2023.findings-emnlp.768
%U https://aclanthology.org/2023.findings-emnlp.768
%U https://doi.org/10.18653/v1/2023.findings-emnlp.768
%P 11474-11483
Markdown (Informal)
[Improving generalization in large langue model by learning prefix subspaces](https://aclanthology.org/2023.findings-emnlp.768) (Falissard et al., Findings 2023)
ACL