@inproceedings{woll-etal-2022-segmentation,
title = "Segmentation of Signs for Research Purposes: Comparing Humans and Machines",
author = "Woll, Bencie and
Fox, Neil and
Cormier, Kearsy",
editor = "Efthimiou, Eleni and
Fotinea, Stavroula-Evita and
Hanke, Thomas and
Hochgesang, Julie A. and
Kristoffersen, Jette and
Mesch, Johanna and
Schulder, Marc",
booktitle = "Proceedings of the LREC2022 10th Workshop on the Representation and Processing of Sign Languages: Multilingual Sign Language Resources",
month = jun,
year = "2022",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://aclanthology.org/2022.signlang-1.31",
pages = "198--201",
abstract = "Sign languages such as British Sign Language (BSL) are visual languages which lack standard writing systems. Annotation of sign language data, especially for the purposes of machine readability, is therefore extremely slow. Tools to help automate and thus speed up the annotation process are very much needed. Here we test the development of one such tool (VIA-SLA), which uses temporal convolutional networks (Renz et al., 2021a, b) for the purpose of segmenting continuous signing in any sign language, and is designed to integrate smoothly with ELAN, the widely used annotation software for analysis of videos of sign language. We compare automatic segmentation by machine with segmentation done by a human, both in terms of time needed and accuracy of segmentation, using samples taken from the BSL Corpus (Schembri et al., 2014). A small sample of four short video files is tested (mean duration 25 seconds). We find that mean accuracy in terms of number and location of segmentations is relatively high, at around 78{\%}. This preliminary test suggests that VIA-SLA promises to be very useful for sign linguists.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="woll-etal-2022-segmentation">
<titleInfo>
<title>Segmentation of Signs for Research Purposes: Comparing Humans and Machines</title>
</titleInfo>
<name type="personal">
<namePart type="given">Bencie</namePart>
<namePart type="family">Woll</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Neil</namePart>
<namePart type="family">Fox</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kearsy</namePart>
<namePart type="family">Cormier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the LREC2022 10th Workshop on the Representation and Processing of Sign Languages: Multilingual Sign Language Resources</title>
</titleInfo>
<name type="personal">
<namePart type="given">Eleni</namePart>
<namePart type="family">Efthimiou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stavroula-Evita</namePart>
<namePart type="family">Fotinea</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thomas</namePart>
<namePart type="family">Hanke</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julie</namePart>
<namePart type="given">A</namePart>
<namePart type="family">Hochgesang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jette</namePart>
<namePart type="family">Kristoffersen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Johanna</namePart>
<namePart type="family">Mesch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marc</namePart>
<namePart type="family">Schulder</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>European Language Resources Association</publisher>
<place>
<placeTerm type="text">Marseille, France</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Sign languages such as British Sign Language (BSL) are visual languages which lack standard writing systems. Annotation of sign language data, especially for the purposes of machine readability, is therefore extremely slow. Tools to help automate and thus speed up the annotation process are very much needed. Here we test the development of one such tool (VIA-SLA), which uses temporal convolutional networks (Renz et al., 2021a, b) for the purpose of segmenting continuous signing in any sign language, and is designed to integrate smoothly with ELAN, the widely used annotation software for analysis of videos of sign language. We compare automatic segmentation by machine with segmentation done by a human, both in terms of time needed and accuracy of segmentation, using samples taken from the BSL Corpus (Schembri et al., 2014). A small sample of four short video files is tested (mean duration 25 seconds). We find that mean accuracy in terms of number and location of segmentations is relatively high, at around 78%. This preliminary test suggests that VIA-SLA promises to be very useful for sign linguists.</abstract>
<identifier type="citekey">woll-etal-2022-segmentation</identifier>
<location>
<url>https://aclanthology.org/2022.signlang-1.31</url>
</location>
<part>
<date>2022-06</date>
<extent unit="page">
<start>198</start>
<end>201</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Segmentation of Signs for Research Purposes: Comparing Humans and Machines
%A Woll, Bencie
%A Fox, Neil
%A Cormier, Kearsy
%Y Efthimiou, Eleni
%Y Fotinea, Stavroula-Evita
%Y Hanke, Thomas
%Y Hochgesang, Julie A.
%Y Kristoffersen, Jette
%Y Mesch, Johanna
%Y Schulder, Marc
%S Proceedings of the LREC2022 10th Workshop on the Representation and Processing of Sign Languages: Multilingual Sign Language Resources
%D 2022
%8 June
%I European Language Resources Association
%C Marseille, France
%F woll-etal-2022-segmentation
%X Sign languages such as British Sign Language (BSL) are visual languages which lack standard writing systems. Annotation of sign language data, especially for the purposes of machine readability, is therefore extremely slow. Tools to help automate and thus speed up the annotation process are very much needed. Here we test the development of one such tool (VIA-SLA), which uses temporal convolutional networks (Renz et al., 2021a, b) for the purpose of segmenting continuous signing in any sign language, and is designed to integrate smoothly with ELAN, the widely used annotation software for analysis of videos of sign language. We compare automatic segmentation by machine with segmentation done by a human, both in terms of time needed and accuracy of segmentation, using samples taken from the BSL Corpus (Schembri et al., 2014). A small sample of four short video files is tested (mean duration 25 seconds). We find that mean accuracy in terms of number and location of segmentations is relatively high, at around 78%. This preliminary test suggests that VIA-SLA promises to be very useful for sign linguists.
%U https://aclanthology.org/2022.signlang-1.31
%P 198-201
Markdown (Informal)
[Segmentation of Signs for Research Purposes: Comparing Humans and Machines](https://aclanthology.org/2022.signlang-1.31) (Woll et al., SignLang 2022)
ACL