@inproceedings{bohnet-etal-2018-morphosyntactic,
title = "Morphosyntactic Tagging with a Meta-{B}i{LSTM} Model over Context Sensitive Token Encodings",
author = "Bohnet, Bernd and
McDonald, Ryan and
Sim{\~o}es, Gon{\c{c}}alo and
Andor, Daniel and
Pitler, Emily and
Maynez, Joshua",
editor = "Gurevych, Iryna and
Miyao, Yusuke",
booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P18-1246",
doi = "10.18653/v1/P18-1246",
pages = "2642--2652",
abstract = "The rise of neural networks, and particularly recurrent neural networks, has produced significant advances in part-of-speech tagging accuracy. One characteristic common among these models is the presence of rich initial word encodings. These encodings typically are composed of a recurrent character-based representation with dynamically and pre-trained word embeddings. However, these encodings do not consider a context wider than a single word and it is only through subsequent recurrent layers that word or sub-word information interacts. In this paper, we investigate models that use recurrent neural networks with sentence-level context for initial character and word-based representations. In particular we show that optimal results are obtained by integrating these context sensitive representations through synchronized training with a meta-model that learns to combine their states.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bohnet-etal-2018-morphosyntactic">
<titleInfo>
<title>Morphosyntactic Tagging with a Meta-BiLSTM Model over Context Sensitive Token Encodings</title>
</titleInfo>
<name type="personal">
<namePart type="given">Bernd</namePart>
<namePart type="family">Bohnet</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ryan</namePart>
<namePart type="family">McDonald</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gonçalo</namePart>
<namePart type="family">Simões</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Andor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emily</namePart>
<namePart type="family">Pitler</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joshua</namePart>
<namePart type="family">Maynez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Iryna</namePart>
<namePart type="family">Gurevych</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yusuke</namePart>
<namePart type="family">Miyao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The rise of neural networks, and particularly recurrent neural networks, has produced significant advances in part-of-speech tagging accuracy. One characteristic common among these models is the presence of rich initial word encodings. These encodings typically are composed of a recurrent character-based representation with dynamically and pre-trained word embeddings. However, these encodings do not consider a context wider than a single word and it is only through subsequent recurrent layers that word or sub-word information interacts. In this paper, we investigate models that use recurrent neural networks with sentence-level context for initial character and word-based representations. In particular we show that optimal results are obtained by integrating these context sensitive representations through synchronized training with a meta-model that learns to combine their states.</abstract>
<identifier type="citekey">bohnet-etal-2018-morphosyntactic</identifier>
<identifier type="doi">10.18653/v1/P18-1246</identifier>
<location>
<url>https://aclanthology.org/P18-1246</url>
</location>
<part>
<date>2018-07</date>
<extent unit="page">
<start>2642</start>
<end>2652</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Morphosyntactic Tagging with a Meta-BiLSTM Model over Context Sensitive Token Encodings
%A Bohnet, Bernd
%A McDonald, Ryan
%A Simões, Gonçalo
%A Andor, Daniel
%A Pitler, Emily
%A Maynez, Joshua
%Y Gurevych, Iryna
%Y Miyao, Yusuke
%S Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2018
%8 July
%I Association for Computational Linguistics
%C Melbourne, Australia
%F bohnet-etal-2018-morphosyntactic
%X The rise of neural networks, and particularly recurrent neural networks, has produced significant advances in part-of-speech tagging accuracy. One characteristic common among these models is the presence of rich initial word encodings. These encodings typically are composed of a recurrent character-based representation with dynamically and pre-trained word embeddings. However, these encodings do not consider a context wider than a single word and it is only through subsequent recurrent layers that word or sub-word information interacts. In this paper, we investigate models that use recurrent neural networks with sentence-level context for initial character and word-based representations. In particular we show that optimal results are obtained by integrating these context sensitive representations through synchronized training with a meta-model that learns to combine their states.
%R 10.18653/v1/P18-1246
%U https://aclanthology.org/P18-1246
%U https://doi.org/10.18653/v1/P18-1246
%P 2642-2652
Markdown (Informal)
[Morphosyntactic Tagging with a Meta-BiLSTM Model over Context Sensitive Token Encodings](https://aclanthology.org/P18-1246) (Bohnet et al., ACL 2018)
ACL