@inproceedings{choi-etal-2024-crosslinguistic,
title = "Crosslinguistic Acoustic Feature-based Dementia Classification Using Advanced Learning Architectures",
author = "Choi, Anna Seo Gyeong and
Kim, Jin-seo and
Kim, Seo-hee and
Back, Min Seok and
Cho, Sunghye",
editor = "Kokkinakis, Dimitrios and
Fraser, Kathleen C. and
Themistocleous, Charalambos K. and
Fors, Kristina Lundholm and
Tsanas, Athanasios and
Ohman, Fredrik",
booktitle = "Proceedings of the Fifth Workshop on Resources and ProcessIng of linguistic, para-linguistic and extra-linguistic Data from people with various forms of cognitive/psychiatric/developmental impairments @LREC-COLING 2024",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://aclanthology.org/2024.rapid-1.11",
pages = "95--100",
abstract = "In this study, we rigorously evaluated eight machine learning and deep learning classifiers for identifying Alzheimer{'}s Disease (AD) patients using crosslinguistic acoustic features automatically extracted from one-minute oral picture descriptions produced by speakers of American English, Korean, and Mandarin Chinese. We employed eGeMAPSv2 and ComParE feature sets on segmented and non-segmented audio data. The Multilayer Perceptron model showed the highest performance, achieving an accuracy of 83.54{\%} and an AUC of 0.8 on the ComParE features extracted from non-segmented picture description data. Our findings suggest that classifiers trained with acoustic features extracted from one-minute picture description data in multiple languages are highly promising as a quick, language-universal, large-scale, remote screening tool for AD. However, the dataset included predominantly English-speaking participants, indicating the need for more balanced multilingual datasets in future research.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="choi-etal-2024-crosslinguistic">
<titleInfo>
<title>Crosslinguistic Acoustic Feature-based Dementia Classification Using Advanced Learning Architectures</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="given">Seo</namePart>
<namePart type="given">Gyeong</namePart>
<namePart type="family">Choi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jin-seo</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Seo-hee</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Min</namePart>
<namePart type="given">Seok</namePart>
<namePart type="family">Back</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sunghye</namePart>
<namePart type="family">Cho</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fifth Workshop on Resources and ProcessIng of linguistic, para-linguistic and extra-linguistic Data from people with various forms of cognitive/psychiatric/developmental impairments @LREC-COLING 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dimitrios</namePart>
<namePart type="family">Kokkinakis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kathleen</namePart>
<namePart type="given">C</namePart>
<namePart type="family">Fraser</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Charalambos</namePart>
<namePart type="given">K</namePart>
<namePart type="family">Themistocleous</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kristina</namePart>
<namePart type="given">Lundholm</namePart>
<namePart type="family">Fors</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Athanasios</namePart>
<namePart type="family">Tsanas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fredrik</namePart>
<namePart type="family">Ohman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>ELRA and ICCL</publisher>
<place>
<placeTerm type="text">Torino, Italia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this study, we rigorously evaluated eight machine learning and deep learning classifiers for identifying Alzheimer’s Disease (AD) patients using crosslinguistic acoustic features automatically extracted from one-minute oral picture descriptions produced by speakers of American English, Korean, and Mandarin Chinese. We employed eGeMAPSv2 and ComParE feature sets on segmented and non-segmented audio data. The Multilayer Perceptron model showed the highest performance, achieving an accuracy of 83.54% and an AUC of 0.8 on the ComParE features extracted from non-segmented picture description data. Our findings suggest that classifiers trained with acoustic features extracted from one-minute picture description data in multiple languages are highly promising as a quick, language-universal, large-scale, remote screening tool for AD. However, the dataset included predominantly English-speaking participants, indicating the need for more balanced multilingual datasets in future research.</abstract>
<identifier type="citekey">choi-etal-2024-crosslinguistic</identifier>
<location>
<url>https://aclanthology.org/2024.rapid-1.11</url>
</location>
<part>
<date>2024-05</date>
<extent unit="page">
<start>95</start>
<end>100</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Crosslinguistic Acoustic Feature-based Dementia Classification Using Advanced Learning Architectures
%A Choi, Anna Seo Gyeong
%A Kim, Jin-seo
%A Kim, Seo-hee
%A Back, Min Seok
%A Cho, Sunghye
%Y Kokkinakis, Dimitrios
%Y Fraser, Kathleen C.
%Y Themistocleous, Charalambos K.
%Y Fors, Kristina Lundholm
%Y Tsanas, Athanasios
%Y Ohman, Fredrik
%S Proceedings of the Fifth Workshop on Resources and ProcessIng of linguistic, para-linguistic and extra-linguistic Data from people with various forms of cognitive/psychiatric/developmental impairments @LREC-COLING 2024
%D 2024
%8 May
%I ELRA and ICCL
%C Torino, Italia
%F choi-etal-2024-crosslinguistic
%X In this study, we rigorously evaluated eight machine learning and deep learning classifiers for identifying Alzheimer’s Disease (AD) patients using crosslinguistic acoustic features automatically extracted from one-minute oral picture descriptions produced by speakers of American English, Korean, and Mandarin Chinese. We employed eGeMAPSv2 and ComParE feature sets on segmented and non-segmented audio data. The Multilayer Perceptron model showed the highest performance, achieving an accuracy of 83.54% and an AUC of 0.8 on the ComParE features extracted from non-segmented picture description data. Our findings suggest that classifiers trained with acoustic features extracted from one-minute picture description data in multiple languages are highly promising as a quick, language-universal, large-scale, remote screening tool for AD. However, the dataset included predominantly English-speaking participants, indicating the need for more balanced multilingual datasets in future research.
%U https://aclanthology.org/2024.rapid-1.11
%P 95-100
Markdown (Informal)
[Crosslinguistic Acoustic Feature-based Dementia Classification Using Advanced Learning Architectures](https://aclanthology.org/2024.rapid-1.11) (Choi et al., RaPID-WS 2024)
ACL