@inproceedings{vongpaseut-etal-2023-evaluating,
title = "Evaluating the Generalization Property of Prefix-based Methods for Data-to-text Generation",
author = "Vongpaseut, Clarine and
Lumbreras, Alberto and
Gartrell, Mike and
Gallinari, Patrick",
editor = "Servan, Christophe and
Vilnat, Anne",
booktitle = "Actes de CORIA-TALN 2023. Actes de la 30e Conf{\'e}rence sur le Traitement Automatique des Langues Naturelles (TALN), volume 2 : travaux de recherche originaux -- articles courts",
month = "6",
year = "2023",
address = "Paris, France",
publisher = "ATALA",
url = "https://aclanthology.org/2023.jeptalnrecital-short.8",
pages = "73--81",
abstract = "Fine-tuning is the prevalent paradigm to adapt pre-trained language models to downstream tasks. Lightweight fine-tuning methods, such as prefix-tuning, only tune a small set of parameters which alleviates cost. Such methods were shown to achieve results similar to fine-tuning; however, performance can decrease when the inputs get farther from the training domain. Moreover, latest works questioned the efficiency of recent lightweight fine-tuning techniques depending on the task and the size of the model. In this paper, we propose to evaluate the generalization property of prefix-based methods depending on the size of the pre-trained language model in the multi-domain setting on data-to-text generation. We found that their performance depends heavily on the size of the model.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="vongpaseut-etal-2023-evaluating">
<titleInfo>
<title>Evaluating the Generalization Property of Prefix-based Methods for Data-to-text Generation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Clarine</namePart>
<namePart type="family">Vongpaseut</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alberto</namePart>
<namePart type="family">Lumbreras</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mike</namePart>
<namePart type="family">Gartrell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Patrick</namePart>
<namePart type="family">Gallinari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Actes de CORIA-TALN 2023. Actes de la 30e Conférence sur le Traitement Automatique des Langues Naturelles (TALN), volume 2 : travaux de recherche originaux – articles courts</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christophe</namePart>
<namePart type="family">Servan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anne</namePart>
<namePart type="family">Vilnat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>ATALA</publisher>
<place>
<placeTerm type="text">Paris, France</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Fine-tuning is the prevalent paradigm to adapt pre-trained language models to downstream tasks. Lightweight fine-tuning methods, such as prefix-tuning, only tune a small set of parameters which alleviates cost. Such methods were shown to achieve results similar to fine-tuning; however, performance can decrease when the inputs get farther from the training domain. Moreover, latest works questioned the efficiency of recent lightweight fine-tuning techniques depending on the task and the size of the model. In this paper, we propose to evaluate the generalization property of prefix-based methods depending on the size of the pre-trained language model in the multi-domain setting on data-to-text generation. We found that their performance depends heavily on the size of the model.</abstract>
<identifier type="citekey">vongpaseut-etal-2023-evaluating</identifier>
<location>
<url>https://aclanthology.org/2023.jeptalnrecital-short.8</url>
</location>
<part>
<date>2023-6</date>
<extent unit="page">
<start>73</start>
<end>81</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Evaluating the Generalization Property of Prefix-based Methods for Data-to-text Generation
%A Vongpaseut, Clarine
%A Lumbreras, Alberto
%A Gartrell, Mike
%A Gallinari, Patrick
%Y Servan, Christophe
%Y Vilnat, Anne
%S Actes de CORIA-TALN 2023. Actes de la 30e Conférence sur le Traitement Automatique des Langues Naturelles (TALN), volume 2 : travaux de recherche originaux – articles courts
%D 2023
%8 June
%I ATALA
%C Paris, France
%F vongpaseut-etal-2023-evaluating
%X Fine-tuning is the prevalent paradigm to adapt pre-trained language models to downstream tasks. Lightweight fine-tuning methods, such as prefix-tuning, only tune a small set of parameters which alleviates cost. Such methods were shown to achieve results similar to fine-tuning; however, performance can decrease when the inputs get farther from the training domain. Moreover, latest works questioned the efficiency of recent lightweight fine-tuning techniques depending on the task and the size of the model. In this paper, we propose to evaluate the generalization property of prefix-based methods depending on the size of the pre-trained language model in the multi-domain setting on data-to-text generation. We found that their performance depends heavily on the size of the model.
%U https://aclanthology.org/2023.jeptalnrecital-short.8
%P 73-81
Markdown (Informal)
[Evaluating the Generalization Property of Prefix-based Methods for Data-to-text Generation](https://aclanthology.org/2023.jeptalnrecital-short.8) (Vongpaseut et al., JEP/TALN/RECITAL 2023)
ACL