@inproceedings{farzana-parde-2024-domain,
title = "Domain Adaptation via Prompt Learning for {A}lzheimer`s Detection",
author = "Farzana, Shahla and
Parde, Natalie",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-emnlp.937/",
doi = "10.18653/v1/2024.findings-emnlp.937",
pages = "15963--15976",
abstract = "Spoken language presents a compelling medium for non-invasive Alzheimer`s disease (AD) screening, and prior work has examined the use of fine-tuned pretrained language models (PLMs) for this purpose. However, PLMs are often optimized on tasks that are inconsistent with AD classification. Spoken language corpora for AD detection are also small and disparate, making generalizability difficult. This paper investigates the use of domain-adaptive prompt fine-tuning for AD detection, using AD classification loss as the training objective and leveraging spoken language corpora from a variety of language tasks. Extensive experiments using voting-based combinations of different prompting paradigms show an impressive mean detection F1=0.8952 (with std=0.01 and best F1=0.9130) for the highest-performing approach when using BERT as the base PLM."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="farzana-parde-2024-domain">
<titleInfo>
<title>Domain Adaptation via Prompt Learning for Alzheimer‘s Detection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shahla</namePart>
<namePart type="family">Farzana</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Natalie</namePart>
<namePart type="family">Parde</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Spoken language presents a compelling medium for non-invasive Alzheimer‘s disease (AD) screening, and prior work has examined the use of fine-tuned pretrained language models (PLMs) for this purpose. However, PLMs are often optimized on tasks that are inconsistent with AD classification. Spoken language corpora for AD detection are also small and disparate, making generalizability difficult. This paper investigates the use of domain-adaptive prompt fine-tuning for AD detection, using AD classification loss as the training objective and leveraging spoken language corpora from a variety of language tasks. Extensive experiments using voting-based combinations of different prompting paradigms show an impressive mean detection F1=0.8952 (with std=0.01 and best F1=0.9130) for the highest-performing approach when using BERT as the base PLM.</abstract>
<identifier type="citekey">farzana-parde-2024-domain</identifier>
<identifier type="doi">10.18653/v1/2024.findings-emnlp.937</identifier>
<location>
<url>https://aclanthology.org/2024.findings-emnlp.937/</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>15963</start>
<end>15976</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Domain Adaptation via Prompt Learning for Alzheimer‘s Detection
%A Farzana, Shahla
%A Parde, Natalie
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Findings of the Association for Computational Linguistics: EMNLP 2024
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F farzana-parde-2024-domain
%X Spoken language presents a compelling medium for non-invasive Alzheimer‘s disease (AD) screening, and prior work has examined the use of fine-tuned pretrained language models (PLMs) for this purpose. However, PLMs are often optimized on tasks that are inconsistent with AD classification. Spoken language corpora for AD detection are also small and disparate, making generalizability difficult. This paper investigates the use of domain-adaptive prompt fine-tuning for AD detection, using AD classification loss as the training objective and leveraging spoken language corpora from a variety of language tasks. Extensive experiments using voting-based combinations of different prompting paradigms show an impressive mean detection F1=0.8952 (with std=0.01 and best F1=0.9130) for the highest-performing approach when using BERT as the base PLM.
%R 10.18653/v1/2024.findings-emnlp.937
%U https://aclanthology.org/2024.findings-emnlp.937/
%U https://doi.org/10.18653/v1/2024.findings-emnlp.937
%P 15963-15976
Markdown (Informal)
[Domain Adaptation via Prompt Learning for Alzheimer’s Detection](https://aclanthology.org/2024.findings-emnlp.937/) (Farzana & Parde, Findings 2024)
ACL