@inproceedings{salton-etal-2017-attentive,
title = "Attentive Language Models",
author = "Salton, Giancarlo and
Ross, Robert and
Kelleher, John",
editor = "Kondrak, Greg and
Watanabe, Taro",
booktitle = "Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
month = nov,
year = "2017",
address = "Taipei, Taiwan",
publisher = "Asian Federation of Natural Language Processing",
url = "https://aclanthology.org/I17-1045",
pages = "441--450",
abstract = "In this paper, we extend Recurrent Neural Network Language Models (RNN-LMs) with an attention mechanism. We show that an {``}attentive{''} RNN-LM (with 11M parameters) achieves a better perplexity than larger RNN-LMs (with 66M parameters) and achieves performance comparable to an ensemble of 10 similar sized RNN-LMs. We also show that an {``}attentive{''} RNN-LM needs less contextual information to achieve similar results to the state-of-the-art on the wikitext2 dataset.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="salton-etal-2017-attentive">
<titleInfo>
<title>Attentive Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Giancarlo</namePart>
<namePart type="family">Salton</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Robert</namePart>
<namePart type="family">Ross</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">John</namePart>
<namePart type="family">Kelleher</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Greg</namePart>
<namePart type="family">Kondrak</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Taro</namePart>
<namePart type="family">Watanabe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Asian Federation of Natural Language Processing</publisher>
<place>
<placeTerm type="text">Taipei, Taiwan</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we extend Recurrent Neural Network Language Models (RNN-LMs) with an attention mechanism. We show that an “attentive” RNN-LM (with 11M parameters) achieves a better perplexity than larger RNN-LMs (with 66M parameters) and achieves performance comparable to an ensemble of 10 similar sized RNN-LMs. We also show that an “attentive” RNN-LM needs less contextual information to achieve similar results to the state-of-the-art on the wikitext2 dataset.</abstract>
<identifier type="citekey">salton-etal-2017-attentive</identifier>
<location>
<url>https://aclanthology.org/I17-1045</url>
</location>
<part>
<date>2017-11</date>
<extent unit="page">
<start>441</start>
<end>450</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Attentive Language Models
%A Salton, Giancarlo
%A Ross, Robert
%A Kelleher, John
%Y Kondrak, Greg
%Y Watanabe, Taro
%S Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)
%D 2017
%8 November
%I Asian Federation of Natural Language Processing
%C Taipei, Taiwan
%F salton-etal-2017-attentive
%X In this paper, we extend Recurrent Neural Network Language Models (RNN-LMs) with an attention mechanism. We show that an “attentive” RNN-LM (with 11M parameters) achieves a better perplexity than larger RNN-LMs (with 66M parameters) and achieves performance comparable to an ensemble of 10 similar sized RNN-LMs. We also show that an “attentive” RNN-LM needs less contextual information to achieve similar results to the state-of-the-art on the wikitext2 dataset.
%U https://aclanthology.org/I17-1045
%P 441-450
Markdown (Informal)
[Attentive Language Models](https://aclanthology.org/I17-1045) (Salton et al., IJCNLP 2017)
ACL
- Giancarlo Salton, Robert Ross, and John Kelleher. 2017. Attentive Language Models. In Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 441–450, Taipei, Taiwan. Asian Federation of Natural Language Processing.