@inproceedings{hahn-choi-2019-self,
title = "Self-Knowledge Distillation in Natural Language Processing",
author = "Hahn, Sangchul and
Choi, Heeyoul",
editor = "Mitkov, Ruslan and
Angelova, Galia",
booktitle = "Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP 2019)",
month = sep,
year = "2019",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd.",
url = "https://aclanthology.org/R19-1050",
doi = "10.26615/978-954-452-056-4_050",
pages = "423--430",
abstract = "Since deep learning became a key player in natural language processing (NLP), many deep learning models have been showing remarkable performances in a variety of NLP tasks. Such high performance can be explained by efficient knowledge representation of deep learning models. Knowledge distillation from pretrained deep networks suggests that we can use more information from the soft target probability to train other neural networks. In this paper, we propose a self-knowledge distillation method, based on the soft target probabilities of the training model itself, where multimode information is distilled from the word embedding space right below the softmax layer. Due to the time complexity, our method approximates the soft target probabilities. In experiments, we applied the proposed method to two different and fundamental NLP tasks: language model and neural machine translation. The experiment results show that our proposed method improves performance on the tasks.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hahn-choi-2019-self">
<titleInfo>
<title>Self-Knowledge Distillation in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sangchul</namePart>
<namePart type="family">Hahn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Heeyoul</namePart>
<namePart type="family">Choi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP 2019)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ruslan</namePart>
<namePart type="family">Mitkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Galia</namePart>
<namePart type="family">Angelova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd.</publisher>
<place>
<placeTerm type="text">Varna, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Since deep learning became a key player in natural language processing (NLP), many deep learning models have been showing remarkable performances in a variety of NLP tasks. Such high performance can be explained by efficient knowledge representation of deep learning models. Knowledge distillation from pretrained deep networks suggests that we can use more information from the soft target probability to train other neural networks. In this paper, we propose a self-knowledge distillation method, based on the soft target probabilities of the training model itself, where multimode information is distilled from the word embedding space right below the softmax layer. Due to the time complexity, our method approximates the soft target probabilities. In experiments, we applied the proposed method to two different and fundamental NLP tasks: language model and neural machine translation. The experiment results show that our proposed method improves performance on the tasks.</abstract>
<identifier type="citekey">hahn-choi-2019-self</identifier>
<identifier type="doi">10.26615/978-954-452-056-4_050</identifier>
<location>
<url>https://aclanthology.org/R19-1050</url>
</location>
<part>
<date>2019-09</date>
<extent unit="page">
<start>423</start>
<end>430</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Self-Knowledge Distillation in Natural Language Processing
%A Hahn, Sangchul
%A Choi, Heeyoul
%Y Mitkov, Ruslan
%Y Angelova, Galia
%S Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP 2019)
%D 2019
%8 September
%I INCOMA Ltd.
%C Varna, Bulgaria
%F hahn-choi-2019-self
%X Since deep learning became a key player in natural language processing (NLP), many deep learning models have been showing remarkable performances in a variety of NLP tasks. Such high performance can be explained by efficient knowledge representation of deep learning models. Knowledge distillation from pretrained deep networks suggests that we can use more information from the soft target probability to train other neural networks. In this paper, we propose a self-knowledge distillation method, based on the soft target probabilities of the training model itself, where multimode information is distilled from the word embedding space right below the softmax layer. Due to the time complexity, our method approximates the soft target probabilities. In experiments, we applied the proposed method to two different and fundamental NLP tasks: language model and neural machine translation. The experiment results show that our proposed method improves performance on the tasks.
%R 10.26615/978-954-452-056-4_050
%U https://aclanthology.org/R19-1050
%U https://doi.org/10.26615/978-954-452-056-4_050
%P 423-430
Markdown (Informal)
[Self-Knowledge Distillation in Natural Language Processing](https://aclanthology.org/R19-1050) (Hahn & Choi, RANLP 2019)
ACL