@inproceedings{zhang-etal-2024-fast,
title = "Fast Adaptation via Prompted Data: An Efficient Cross-Domain Fine-tuning Method for Large Language Models",
author = "Zhang, Yiming and
Yang, Hantao and
Wang, Haobo and
Zhao, Jake",
editor = "Calzolari, Nicoletta and
Kan, Min-Yen and
Hoste, Veronique and
Lenci, Alessandro and
Sakti, Sakriani and
Xue, Nianwen",
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://aclanthology.org/2024.lrec-main.625",
pages = "7117--7132",
abstract = "Large language models (LLMs) have achieved great success in a variety of natural language understanding tasks. However, domain discrepancies between the downstream task and the pre-training corpora may have hurdled LLMs to excel further in the vertical applications. Contrary to prior computational-heavy methods, we propose a lightweight solution to further bridge the gap in applying LLMs to diverse downstream tasks {---} a Fast Adaptation method for LLMs via Prompted Data, in short FAvPD. Notably, with FAvPD, we establish an additional adaptive tuning procedure, wherein we integrate downstream text corpora, gold labels as well as external knowledge sources and then envelop them into a form of highly controllable prompt. As a simple, easy-to-use, and versatile solution, FAvPD lies in the intersection of regimes like knowledge-augmented LLMs, fine-tuning, and adaptation techniques. With extensive experiments, we prove that FAvPD excels in both performance efficacy and training efficiency over related prior works. FAvPD is publicly available at https://github.com/Hyatio/FAvPD.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhang-etal-2024-fast">
<titleInfo>
<title>Fast Adaptation via Prompted Data: An Efficient Cross-Domain Fine-tuning Method for Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yiming</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hantao</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Haobo</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jake</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nicoletta</namePart>
<namePart type="family">Calzolari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Min-Yen</namePart>
<namePart type="family">Kan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Veronique</namePart>
<namePart type="family">Hoste</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Lenci</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sakriani</namePart>
<namePart type="family">Sakti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nianwen</namePart>
<namePart type="family">Xue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>ELRA and ICCL</publisher>
<place>
<placeTerm type="text">Torino, Italia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large language models (LLMs) have achieved great success in a variety of natural language understanding tasks. However, domain discrepancies between the downstream task and the pre-training corpora may have hurdled LLMs to excel further in the vertical applications. Contrary to prior computational-heavy methods, we propose a lightweight solution to further bridge the gap in applying LLMs to diverse downstream tasks — a Fast Adaptation method for LLMs via Prompted Data, in short FAvPD. Notably, with FAvPD, we establish an additional adaptive tuning procedure, wherein we integrate downstream text corpora, gold labels as well as external knowledge sources and then envelop them into a form of highly controllable prompt. As a simple, easy-to-use, and versatile solution, FAvPD lies in the intersection of regimes like knowledge-augmented LLMs, fine-tuning, and adaptation techniques. With extensive experiments, we prove that FAvPD excels in both performance efficacy and training efficiency over related prior works. FAvPD is publicly available at https://github.com/Hyatio/FAvPD.</abstract>
<identifier type="citekey">zhang-etal-2024-fast</identifier>
<location>
<url>https://aclanthology.org/2024.lrec-main.625</url>
</location>
<part>
<date>2024-05</date>
<extent unit="page">
<start>7117</start>
<end>7132</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Fast Adaptation via Prompted Data: An Efficient Cross-Domain Fine-tuning Method for Large Language Models
%A Zhang, Yiming
%A Yang, Hantao
%A Wang, Haobo
%A Zhao, Jake
%Y Calzolari, Nicoletta
%Y Kan, Min-Yen
%Y Hoste, Veronique
%Y Lenci, Alessandro
%Y Sakti, Sakriani
%Y Xue, Nianwen
%S Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)
%D 2024
%8 May
%I ELRA and ICCL
%C Torino, Italia
%F zhang-etal-2024-fast
%X Large language models (LLMs) have achieved great success in a variety of natural language understanding tasks. However, domain discrepancies between the downstream task and the pre-training corpora may have hurdled LLMs to excel further in the vertical applications. Contrary to prior computational-heavy methods, we propose a lightweight solution to further bridge the gap in applying LLMs to diverse downstream tasks — a Fast Adaptation method for LLMs via Prompted Data, in short FAvPD. Notably, with FAvPD, we establish an additional adaptive tuning procedure, wherein we integrate downstream text corpora, gold labels as well as external knowledge sources and then envelop them into a form of highly controllable prompt. As a simple, easy-to-use, and versatile solution, FAvPD lies in the intersection of regimes like knowledge-augmented LLMs, fine-tuning, and adaptation techniques. With extensive experiments, we prove that FAvPD excels in both performance efficacy and training efficiency over related prior works. FAvPD is publicly available at https://github.com/Hyatio/FAvPD.
%U https://aclanthology.org/2024.lrec-main.625
%P 7117-7132
Markdown (Informal)
[Fast Adaptation via Prompted Data: An Efficient Cross-Domain Fine-tuning Method for Large Language Models](https://aclanthology.org/2024.lrec-main.625) (Zhang et al., LREC-COLING 2024)
ACL