@inproceedings{prickett-etal-2018-seq2seq,
title = "{S}eq2{S}eq Models with Dropout can Learn Generalizable Reduplication",
author = "Prickett, Brandon and
Traylor, Aaron and
Pater, Joe",
editor = "Kuebler, Sandra and
Nicolai, Garrett",
booktitle = "Proceedings of the Fifteenth Workshop on Computational Research in Phonetics, Phonology, and Morphology",
month = oct,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-5810",
doi = "10.18653/v1/W18-5810",
pages = "93--100",
abstract = "Natural language reduplication can pose a challenge to neural models of language, and has been argued to require variables (Marcus et al., 1999). Sequence-to-sequence neural networks have been shown to perform well at a number of other morphological tasks (Cotterell et al., 2016), and produce results that highly correlate with human behavior (Kirov, 2017; Kirov {\&} Cotterell, 2018) but do not include any explicit variables in their architecture. We find that they can learn a reduplicative pattern that generalizes to novel segments if they are trained with dropout (Srivastava et al., 2014). We argue that this matches the scope of generalization observed in human reduplication.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="prickett-etal-2018-seq2seq">
<titleInfo>
<title>Seq2Seq Models with Dropout can Learn Generalizable Reduplication</title>
</titleInfo>
<name type="personal">
<namePart type="given">Brandon</namePart>
<namePart type="family">Prickett</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aaron</namePart>
<namePart type="family">Traylor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joe</namePart>
<namePart type="family">Pater</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fifteenth Workshop on Computational Research in Phonetics, Phonology, and Morphology</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sandra</namePart>
<namePart type="family">Kuebler</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Garrett</namePart>
<namePart type="family">Nicolai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Natural language reduplication can pose a challenge to neural models of language, and has been argued to require variables (Marcus et al., 1999). Sequence-to-sequence neural networks have been shown to perform well at a number of other morphological tasks (Cotterell et al., 2016), and produce results that highly correlate with human behavior (Kirov, 2017; Kirov & Cotterell, 2018) but do not include any explicit variables in their architecture. We find that they can learn a reduplicative pattern that generalizes to novel segments if they are trained with dropout (Srivastava et al., 2014). We argue that this matches the scope of generalization observed in human reduplication.</abstract>
<identifier type="citekey">prickett-etal-2018-seq2seq</identifier>
<identifier type="doi">10.18653/v1/W18-5810</identifier>
<location>
<url>https://aclanthology.org/W18-5810</url>
</location>
<part>
<date>2018-10</date>
<extent unit="page">
<start>93</start>
<end>100</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Seq2Seq Models with Dropout can Learn Generalizable Reduplication
%A Prickett, Brandon
%A Traylor, Aaron
%A Pater, Joe
%Y Kuebler, Sandra
%Y Nicolai, Garrett
%S Proceedings of the Fifteenth Workshop on Computational Research in Phonetics, Phonology, and Morphology
%D 2018
%8 October
%I Association for Computational Linguistics
%C Brussels, Belgium
%F prickett-etal-2018-seq2seq
%X Natural language reduplication can pose a challenge to neural models of language, and has been argued to require variables (Marcus et al., 1999). Sequence-to-sequence neural networks have been shown to perform well at a number of other morphological tasks (Cotterell et al., 2016), and produce results that highly correlate with human behavior (Kirov, 2017; Kirov & Cotterell, 2018) but do not include any explicit variables in their architecture. We find that they can learn a reduplicative pattern that generalizes to novel segments if they are trained with dropout (Srivastava et al., 2014). We argue that this matches the scope of generalization observed in human reduplication.
%R 10.18653/v1/W18-5810
%U https://aclanthology.org/W18-5810
%U https://doi.org/10.18653/v1/W18-5810
%P 93-100
Markdown (Informal)
[Seq2Seq Models with Dropout can Learn Generalizable Reduplication](https://aclanthology.org/W18-5810) (Prickett et al., EMNLP 2018)
ACL