@inproceedings{yu-etal-2018-device,
title = "On-Device Neural Language Model Based Word Prediction",
author = "Yu, Seunghak and
Kulkarni, Nilesh and
Lee, Haejun and
Kim, Jihie",
editor = "Zhao, Dongyan",
booktitle = "Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations",
month = aug,
year = "2018",
address = "Santa Fe, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/C18-2028",
pages = "128--131",
abstract = "Recent developments in deep learning with application to language modeling have led to success in tasks of text processing, summarizing and machine translation. However, deploying huge language models for the mobile device such as on-device keyboards poses computation as a bottle-neck due to their puny computation capacities. In this work, we propose an on-device neural language model based word prediction method that optimizes run-time memory and also provides a real-time prediction environment. Our model size is 7.40MB and has average prediction time of 6.47 ms. Our proposed model outperforms the existing methods for word prediction in terms of keystroke savings and word prediction rate and has been successfully commercialized.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yu-etal-2018-device">
<titleInfo>
<title>On-Device Neural Language Model Based Word Prediction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Seunghak</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nilesh</namePart>
<namePart type="family">Kulkarni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Haejun</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jihie</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dongyan</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Santa Fe, New Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent developments in deep learning with application to language modeling have led to success in tasks of text processing, summarizing and machine translation. However, deploying huge language models for the mobile device such as on-device keyboards poses computation as a bottle-neck due to their puny computation capacities. In this work, we propose an on-device neural language model based word prediction method that optimizes run-time memory and also provides a real-time prediction environment. Our model size is 7.40MB and has average prediction time of 6.47 ms. Our proposed model outperforms the existing methods for word prediction in terms of keystroke savings and word prediction rate and has been successfully commercialized.</abstract>
<identifier type="citekey">yu-etal-2018-device</identifier>
<location>
<url>https://aclanthology.org/C18-2028</url>
</location>
<part>
<date>2018-08</date>
<extent unit="page">
<start>128</start>
<end>131</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T On-Device Neural Language Model Based Word Prediction
%A Yu, Seunghak
%A Kulkarni, Nilesh
%A Lee, Haejun
%A Kim, Jihie
%Y Zhao, Dongyan
%S Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations
%D 2018
%8 August
%I Association for Computational Linguistics
%C Santa Fe, New Mexico
%F yu-etal-2018-device
%X Recent developments in deep learning with application to language modeling have led to success in tasks of text processing, summarizing and machine translation. However, deploying huge language models for the mobile device such as on-device keyboards poses computation as a bottle-neck due to their puny computation capacities. In this work, we propose an on-device neural language model based word prediction method that optimizes run-time memory and also provides a real-time prediction environment. Our model size is 7.40MB and has average prediction time of 6.47 ms. Our proposed model outperforms the existing methods for word prediction in terms of keystroke savings and word prediction rate and has been successfully commercialized.
%U https://aclanthology.org/C18-2028
%P 128-131
Markdown (Informal)
[On-Device Neural Language Model Based Word Prediction](https://aclanthology.org/C18-2028) (Yu et al., COLING 2018)
ACL
- Seunghak Yu, Nilesh Kulkarni, Haejun Lee, and Jihie Kim. 2018. On-Device Neural Language Model Based Word Prediction. In Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations, pages 128–131, Santa Fe, New Mexico. Association for Computational Linguistics.