@inproceedings{peters-martins-2019-ist,
title = "{IT}{--}{IST} at the {SIGMORPHON} 2019 Shared Task: Sparse Two-headed Models for Inflection",
author = "Peters, Ben and
Martins, Andr{\'e} F. T.",
editor = "Nicolai, Garrett and
Cotterell, Ryan",
booktitle = "Proceedings of the 16th Workshop on Computational Research in Phonetics, Phonology, and Morphology",
month = aug,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-4207",
doi = "10.18653/v1/W19-4207",
pages = "50--56",
abstract = "This paper presents the Instituto de Telecomunica{\c{c}}{\~o}es{--}Instituto Superior T{\'e}cnico submission to Task 1 of the SIGMORPHON 2019 Shared Task. Our models combine sparse sequence-to-sequence models with a two-headed attention mechanism that learns separate attention distributions for the lemma and inflectional tags. Among submissions to Task 1, our models rank second and third. Despite the low data setting of the task (only 100 in-language training examples), they learn plausible inflection patterns and often concentrate all probability mass into a small set of hypotheses, making beam search exact.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="peters-martins-2019-ist">
<titleInfo>
<title>IT–IST at the SIGMORPHON 2019 Shared Task: Sparse Two-headed Models for Inflection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ben</namePart>
<namePart type="family">Peters</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">André</namePart>
<namePart type="given">F</namePart>
<namePart type="given">T</namePart>
<namePart type="family">Martins</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 16th Workshop on Computational Research in Phonetics, Phonology, and Morphology</title>
</titleInfo>
<name type="personal">
<namePart type="given">Garrett</namePart>
<namePart type="family">Nicolai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ryan</namePart>
<namePart type="family">Cotterell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Florence, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper presents the Instituto de Telecomunicações–Instituto Superior Técnico submission to Task 1 of the SIGMORPHON 2019 Shared Task. Our models combine sparse sequence-to-sequence models with a two-headed attention mechanism that learns separate attention distributions for the lemma and inflectional tags. Among submissions to Task 1, our models rank second and third. Despite the low data setting of the task (only 100 in-language training examples), they learn plausible inflection patterns and often concentrate all probability mass into a small set of hypotheses, making beam search exact.</abstract>
<identifier type="citekey">peters-martins-2019-ist</identifier>
<identifier type="doi">10.18653/v1/W19-4207</identifier>
<location>
<url>https://aclanthology.org/W19-4207</url>
</location>
<part>
<date>2019-08</date>
<extent unit="page">
<start>50</start>
<end>56</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T IT–IST at the SIGMORPHON 2019 Shared Task: Sparse Two-headed Models for Inflection
%A Peters, Ben
%A Martins, André F. T.
%Y Nicolai, Garrett
%Y Cotterell, Ryan
%S Proceedings of the 16th Workshop on Computational Research in Phonetics, Phonology, and Morphology
%D 2019
%8 August
%I Association for Computational Linguistics
%C Florence, Italy
%F peters-martins-2019-ist
%X This paper presents the Instituto de Telecomunicações–Instituto Superior Técnico submission to Task 1 of the SIGMORPHON 2019 Shared Task. Our models combine sparse sequence-to-sequence models with a two-headed attention mechanism that learns separate attention distributions for the lemma and inflectional tags. Among submissions to Task 1, our models rank second and third. Despite the low data setting of the task (only 100 in-language training examples), they learn plausible inflection patterns and often concentrate all probability mass into a small set of hypotheses, making beam search exact.
%R 10.18653/v1/W19-4207
%U https://aclanthology.org/W19-4207
%U https://doi.org/10.18653/v1/W19-4207
%P 50-56
Markdown (Informal)
[IT–IST at the SIGMORPHON 2019 Shared Task: Sparse Two-headed Models for Inflection](https://aclanthology.org/W19-4207) (Peters & Martins, ACL 2019)
ACL