@inproceedings{assylbekov-takhanov-2018-reusing,
title = "Reusing Weights in Subword-Aware Neural Language Models",
author = "Assylbekov, Zhenisbek and
Takhanov, Rustem",
editor = "Walker, Marilyn and
Ji, Heng and
Stent, Amanda",
booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)",
month = jun,
year = "2018",
address = "New Orleans, Louisiana",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/N18-1128",
doi = "10.18653/v1/N18-1128",
pages = "1413--1423",
abstract = "We propose several ways of reusing subword embeddings and other weights in subword-aware neural language models. The proposed techniques do not benefit a competitive character-aware model, but some of them improve the performance of syllable- and morpheme-aware models while showing significant reductions in model sizes. We discover a simple hands-on principle: in a multi-layer input embedding model, layers should be tied consecutively bottom-up if reused at output. Our best morpheme-aware model with properly reused weights beats the competitive word-level model by a large margin across multiple languages and has 20{\%}-87{\%} fewer parameters.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="assylbekov-takhanov-2018-reusing">
<titleInfo>
<title>Reusing Weights in Subword-Aware Neural Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhenisbek</namePart>
<namePart type="family">Assylbekov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rustem</namePart>
<namePart type="family">Takhanov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marilyn</namePart>
<namePart type="family">Walker</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Heng</namePart>
<namePart type="family">Ji</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amanda</namePart>
<namePart type="family">Stent</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">New Orleans, Louisiana</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We propose several ways of reusing subword embeddings and other weights in subword-aware neural language models. The proposed techniques do not benefit a competitive character-aware model, but some of them improve the performance of syllable- and morpheme-aware models while showing significant reductions in model sizes. We discover a simple hands-on principle: in a multi-layer input embedding model, layers should be tied consecutively bottom-up if reused at output. Our best morpheme-aware model with properly reused weights beats the competitive word-level model by a large margin across multiple languages and has 20%-87% fewer parameters.</abstract>
<identifier type="citekey">assylbekov-takhanov-2018-reusing</identifier>
<identifier type="doi">10.18653/v1/N18-1128</identifier>
<location>
<url>https://aclanthology.org/N18-1128</url>
</location>
<part>
<date>2018-06</date>
<extent unit="page">
<start>1413</start>
<end>1423</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Reusing Weights in Subword-Aware Neural Language Models
%A Assylbekov, Zhenisbek
%A Takhanov, Rustem
%Y Walker, Marilyn
%Y Ji, Heng
%Y Stent, Amanda
%S Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)
%D 2018
%8 June
%I Association for Computational Linguistics
%C New Orleans, Louisiana
%F assylbekov-takhanov-2018-reusing
%X We propose several ways of reusing subword embeddings and other weights in subword-aware neural language models. The proposed techniques do not benefit a competitive character-aware model, but some of them improve the performance of syllable- and morpheme-aware models while showing significant reductions in model sizes. We discover a simple hands-on principle: in a multi-layer input embedding model, layers should be tied consecutively bottom-up if reused at output. Our best morpheme-aware model with properly reused weights beats the competitive word-level model by a large margin across multiple languages and has 20%-87% fewer parameters.
%R 10.18653/v1/N18-1128
%U https://aclanthology.org/N18-1128
%U https://doi.org/10.18653/v1/N18-1128
%P 1413-1423
Markdown (Informal)
[Reusing Weights in Subword-Aware Neural Language Models](https://aclanthology.org/N18-1128) (Assylbekov & Takhanov, NAACL 2018)
ACL
- Zhenisbek Assylbekov and Rustem Takhanov. 2018. Reusing Weights in Subword-Aware Neural Language Models. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1413–1423, New Orleans, Louisiana. Association for Computational Linguistics.