@inproceedings{prakash-etal-2020-compressing,
title = "Compressing Transformer-Based Semantic Parsing Models using Compositional Code Embeddings",
author = "Prakash, Prafull and
Shashidhar, Saurabh Kumar and
Zhao, Wenlong and
Rongali, Subendhu and
Khan, Haidar and
Kayser, Michael",
editor = "Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.findings-emnlp.423",
doi = "10.18653/v1/2020.findings-emnlp.423",
pages = "4711--4717",
abstract = "The current state-of-the-art task-oriented semantic parsing models use BERT or RoBERTa as pretrained encoders; these models have huge memory footprints. This poses a challenge to their deployment for voice assistants such as Amazon Alexa and Google Assistant on edge devices with limited memory budgets. We propose to learn compositional code embeddings to greatly reduce the sizes of BERT-base and RoBERTa-base. We also apply the technique to DistilBERT, ALBERT-base, and ALBERT-large, three already compressed BERT variants which attain similar state-of-the-art performances on semantic parsing with much smaller model sizes. We observe 95.15{\%} 98.46{\%} embedding compression rates and 20.47{\%} 34.22{\%} encoder compression rates, while preserving {\textgreater}97.5{\%} semantic parsing performances. We provide the recipe for training and analyze the trade-off between code embedding sizes and downstream performances.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="prakash-etal-2020-compressing">
<titleInfo>
<title>Compressing Transformer-Based Semantic Parsing Models using Compositional Code Embeddings</title>
</titleInfo>
<name type="personal">
<namePart type="given">Prafull</namePart>
<namePart type="family">Prakash</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saurabh</namePart>
<namePart type="given">Kumar</namePart>
<namePart type="family">Shashidhar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wenlong</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Subendhu</namePart>
<namePart type="family">Rongali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Haidar</namePart>
<namePart type="family">Khan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Kayser</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2020</title>
</titleInfo>
<name type="personal">
<namePart type="given">Trevor</namePart>
<namePart type="family">Cohn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulan</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The current state-of-the-art task-oriented semantic parsing models use BERT or RoBERTa as pretrained encoders; these models have huge memory footprints. This poses a challenge to their deployment for voice assistants such as Amazon Alexa and Google Assistant on edge devices with limited memory budgets. We propose to learn compositional code embeddings to greatly reduce the sizes of BERT-base and RoBERTa-base. We also apply the technique to DistilBERT, ALBERT-base, and ALBERT-large, three already compressed BERT variants which attain similar state-of-the-art performances on semantic parsing with much smaller model sizes. We observe 95.15% 98.46% embedding compression rates and 20.47% 34.22% encoder compression rates, while preserving \textgreater97.5% semantic parsing performances. We provide the recipe for training and analyze the trade-off between code embedding sizes and downstream performances.</abstract>
<identifier type="citekey">prakash-etal-2020-compressing</identifier>
<identifier type="doi">10.18653/v1/2020.findings-emnlp.423</identifier>
<location>
<url>https://aclanthology.org/2020.findings-emnlp.423</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>4711</start>
<end>4717</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Compressing Transformer-Based Semantic Parsing Models using Compositional Code Embeddings
%A Prakash, Prafull
%A Shashidhar, Saurabh Kumar
%A Zhao, Wenlong
%A Rongali, Subendhu
%A Khan, Haidar
%A Kayser, Michael
%Y Cohn, Trevor
%Y He, Yulan
%Y Liu, Yang
%S Findings of the Association for Computational Linguistics: EMNLP 2020
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F prakash-etal-2020-compressing
%X The current state-of-the-art task-oriented semantic parsing models use BERT or RoBERTa as pretrained encoders; these models have huge memory footprints. This poses a challenge to their deployment for voice assistants such as Amazon Alexa and Google Assistant on edge devices with limited memory budgets. We propose to learn compositional code embeddings to greatly reduce the sizes of BERT-base and RoBERTa-base. We also apply the technique to DistilBERT, ALBERT-base, and ALBERT-large, three already compressed BERT variants which attain similar state-of-the-art performances on semantic parsing with much smaller model sizes. We observe 95.15% 98.46% embedding compression rates and 20.47% 34.22% encoder compression rates, while preserving \textgreater97.5% semantic parsing performances. We provide the recipe for training and analyze the trade-off between code embedding sizes and downstream performances.
%R 10.18653/v1/2020.findings-emnlp.423
%U https://aclanthology.org/2020.findings-emnlp.423
%U https://doi.org/10.18653/v1/2020.findings-emnlp.423
%P 4711-4717
Markdown (Informal)
[Compressing Transformer-Based Semantic Parsing Models using Compositional Code Embeddings](https://aclanthology.org/2020.findings-emnlp.423) (Prakash et al., Findings 2020)
ACL