@inproceedings{kodner-khalifa-2022-sigmorphon,
title = "{SIGMORPHON}{--}{U}ni{M}orph 2022 Shared Task 0: Modeling Inflection in Language Acquisition",
author = "Kodner, Jordan and
Khalifa, Salam",
editor = "Nicolai, Garrett and
Chodroff, Eleanor",
booktitle = "Proceedings of the 19th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology",
month = jul,
year = "2022",
address = "Seattle, Washington",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.sigmorphon-1.18/",
doi = "10.18653/v1/2022.sigmorphon-1.18",
pages = "157--175",
abstract = "This year`s iteration of the SIGMORPHONUniMorph shared task on {\textquotedblleft}human-like{\textquotedblright} morphological inflection generation focuses on generalization and errors in language acquisition. Systems are trained on data sets extracted from corpora of child-directed speech in order to simulate a natural learning setting, and their predictions are evaluated against what is known about children`s developmental trajectories for three well-studied patterns: English past tense, German noun plurals, and Arabic noun plurals. Three submitted neural systems were evaluated together with two baselines. Performance was generally good, and all systems were prone to human-like over-regularization. However, all systems were also prone to non-human-like over-irregularization and nonsense productions to varying degrees. We situate this behavior in a discussion of the Past Tense Debate."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kodner-khalifa-2022-sigmorphon">
<titleInfo>
<title>SIGMORPHON–UniMorph 2022 Shared Task 0: Modeling Inflection in Language Acquisition</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jordan</namePart>
<namePart type="family">Kodner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Salam</namePart>
<namePart type="family">Khalifa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 19th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology</title>
</titleInfo>
<name type="personal">
<namePart type="given">Garrett</namePart>
<namePart type="family">Nicolai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eleanor</namePart>
<namePart type="family">Chodroff</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, Washington</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This year‘s iteration of the SIGMORPHONUniMorph shared task on “human-like” morphological inflection generation focuses on generalization and errors in language acquisition. Systems are trained on data sets extracted from corpora of child-directed speech in order to simulate a natural learning setting, and their predictions are evaluated against what is known about children‘s developmental trajectories for three well-studied patterns: English past tense, German noun plurals, and Arabic noun plurals. Three submitted neural systems were evaluated together with two baselines. Performance was generally good, and all systems were prone to human-like over-regularization. However, all systems were also prone to non-human-like over-irregularization and nonsense productions to varying degrees. We situate this behavior in a discussion of the Past Tense Debate.</abstract>
<identifier type="citekey">kodner-khalifa-2022-sigmorphon</identifier>
<identifier type="doi">10.18653/v1/2022.sigmorphon-1.18</identifier>
<location>
<url>https://aclanthology.org/2022.sigmorphon-1.18/</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>157</start>
<end>175</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T SIGMORPHON–UniMorph 2022 Shared Task 0: Modeling Inflection in Language Acquisition
%A Kodner, Jordan
%A Khalifa, Salam
%Y Nicolai, Garrett
%Y Chodroff, Eleanor
%S Proceedings of the 19th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, Washington
%F kodner-khalifa-2022-sigmorphon
%X This year‘s iteration of the SIGMORPHONUniMorph shared task on “human-like” morphological inflection generation focuses on generalization and errors in language acquisition. Systems are trained on data sets extracted from corpora of child-directed speech in order to simulate a natural learning setting, and their predictions are evaluated against what is known about children‘s developmental trajectories for three well-studied patterns: English past tense, German noun plurals, and Arabic noun plurals. Three submitted neural systems were evaluated together with two baselines. Performance was generally good, and all systems were prone to human-like over-regularization. However, all systems were also prone to non-human-like over-irregularization and nonsense productions to varying degrees. We situate this behavior in a discussion of the Past Tense Debate.
%R 10.18653/v1/2022.sigmorphon-1.18
%U https://aclanthology.org/2022.sigmorphon-1.18/
%U https://doi.org/10.18653/v1/2022.sigmorphon-1.18
%P 157-175
Markdown (Informal)
[SIGMORPHON–UniMorph 2022 Shared Task 0: Modeling Inflection in Language Acquisition](https://aclanthology.org/2022.sigmorphon-1.18/) (Kodner & Khalifa, SIGMORPHON 2022)
ACL