@inproceedings{kezar-etal-2025-american,
title = "The {A}merican {S}ign {L}anguage Knowledge Graph: Infusing {ASL} Models with Linguistic Knowledge",
author = "Kezar, Lee and
Munikote, Nidhi and
Zeng, Zian and
Sehyr, Zed and
Caselli, Naomi and
Thomason, Jesse",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-naacl.389/",
doi = "10.18653/v1/2025.findings-naacl.389",
pages = "7017--7029",
ISBN = "979-8-89176-195-7",
abstract = "Sign language models could make modern language technologies more accessible to those who sign, but the supply of accurately labeled data struggles to meet the demand associated with training large, end-to-end neural models. As an alternative to this approach, we explore how knowledge about the linguistic structure of signs may be used as inductive priors for learning sign recognition and comprehension tasks. We first construct the American Sign Language Knowledge Graph (ASLKG) from 11 sources of linguistic knowledge, with emphasis on features related to signs' phonological and lexical-semantic properties. Then, we use the ASLKG to train neuro-symbolic models on ASL video input tasks, achieving accuracies of 91{\%} for isolated sign recognition, 14{\%} for predicting the semantic features of unseen signs, and 36{\%} for classifying the topic of Youtube-ASL videos."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kezar-etal-2025-american">
<titleInfo>
<title>The American Sign Language Knowledge Graph: Infusing ASL Models with Linguistic Knowledge</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lee</namePart>
<namePart type="family">Kezar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nidhi</namePart>
<namePart type="family">Munikote</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zian</namePart>
<namePart type="family">Zeng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zed</namePart>
<namePart type="family">Sehyr</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Naomi</namePart>
<namePart type="family">Caselli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jesse</namePart>
<namePart type="family">Thomason</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: NAACL 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Luis</namePart>
<namePart type="family">Chiruzzo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alan</namePart>
<namePart type="family">Ritter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lu</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Albuquerque, New Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-195-7</identifier>
</relatedItem>
<abstract>Sign language models could make modern language technologies more accessible to those who sign, but the supply of accurately labeled data struggles to meet the demand associated with training large, end-to-end neural models. As an alternative to this approach, we explore how knowledge about the linguistic structure of signs may be used as inductive priors for learning sign recognition and comprehension tasks. We first construct the American Sign Language Knowledge Graph (ASLKG) from 11 sources of linguistic knowledge, with emphasis on features related to signs’ phonological and lexical-semantic properties. Then, we use the ASLKG to train neuro-symbolic models on ASL video input tasks, achieving accuracies of 91% for isolated sign recognition, 14% for predicting the semantic features of unseen signs, and 36% for classifying the topic of Youtube-ASL videos.</abstract>
<identifier type="citekey">kezar-etal-2025-american</identifier>
<identifier type="doi">10.18653/v1/2025.findings-naacl.389</identifier>
<location>
<url>https://aclanthology.org/2025.findings-naacl.389/</url>
</location>
<part>
<date>2025-04</date>
<extent unit="page">
<start>7017</start>
<end>7029</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T The American Sign Language Knowledge Graph: Infusing ASL Models with Linguistic Knowledge
%A Kezar, Lee
%A Munikote, Nidhi
%A Zeng, Zian
%A Sehyr, Zed
%A Caselli, Naomi
%A Thomason, Jesse
%Y Chiruzzo, Luis
%Y Ritter, Alan
%Y Wang, Lu
%S Findings of the Association for Computational Linguistics: NAACL 2025
%D 2025
%8 April
%I Association for Computational Linguistics
%C Albuquerque, New Mexico
%@ 979-8-89176-195-7
%F kezar-etal-2025-american
%X Sign language models could make modern language technologies more accessible to those who sign, but the supply of accurately labeled data struggles to meet the demand associated with training large, end-to-end neural models. As an alternative to this approach, we explore how knowledge about the linguistic structure of signs may be used as inductive priors for learning sign recognition and comprehension tasks. We first construct the American Sign Language Knowledge Graph (ASLKG) from 11 sources of linguistic knowledge, with emphasis on features related to signs’ phonological and lexical-semantic properties. Then, we use the ASLKG to train neuro-symbolic models on ASL video input tasks, achieving accuracies of 91% for isolated sign recognition, 14% for predicting the semantic features of unseen signs, and 36% for classifying the topic of Youtube-ASL videos.
%R 10.18653/v1/2025.findings-naacl.389
%U https://aclanthology.org/2025.findings-naacl.389/
%U https://doi.org/10.18653/v1/2025.findings-naacl.389
%P 7017-7029
Markdown (Informal)
[The American Sign Language Knowledge Graph: Infusing ASL Models with Linguistic Knowledge](https://aclanthology.org/2025.findings-naacl.389/) (Kezar et al., Findings 2025)
ACL