@inproceedings{bashier-etal-2021-disk,
title = "{DISK}-{CSV}: Distilling Interpretable Semantic Knowledge with a Class Semantic Vector",
author = "Bashier, Housam Khalifa and
Kim, Mi-Young and
Goebel, Randy",
editor = "Merlo, Paola and
Tiedemann, Jorg and
Tsarfaty, Reut",
booktitle = "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume",
month = apr,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.eacl-main.263",
doi = "10.18653/v1/2021.eacl-main.263",
pages = "3021--3030",
abstract = "Neural networks (NN) applied to natural language processing (NLP) are becoming deeper and more complex, making them increasingly difficult to understand and interpret. Even in applications of limited scope on fixed data, the creation of these complex {``}black-boxes{''} creates substantial challenges for debugging, understanding, and generalization. But rapid development in this field has now lead to building more straightforward and interpretable models. We propose a new technique (DISK-CSV) to distill knowledge concurrently from any neural network architecture for text classification, captured as a lightweight interpretable/explainable classifier. Across multiple datasets, our approach achieves better performance than the target black-box. In addition, our approach provides better explanations than existing techniques.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bashier-etal-2021-disk">
<titleInfo>
<title>DISK-CSV: Distilling Interpretable Semantic Knowledge with a Class Semantic Vector</title>
</titleInfo>
<name type="personal">
<namePart type="given">Housam</namePart>
<namePart type="given">Khalifa</namePart>
<namePart type="family">Bashier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mi-Young</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Randy</namePart>
<namePart type="family">Goebel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume</title>
</titleInfo>
<name type="personal">
<namePart type="given">Paola</namePart>
<namePart type="family">Merlo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jorg</namePart>
<namePart type="family">Tiedemann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Reut</namePart>
<namePart type="family">Tsarfaty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Neural networks (NN) applied to natural language processing (NLP) are becoming deeper and more complex, making them increasingly difficult to understand and interpret. Even in applications of limited scope on fixed data, the creation of these complex “black-boxes” creates substantial challenges for debugging, understanding, and generalization. But rapid development in this field has now lead to building more straightforward and interpretable models. We propose a new technique (DISK-CSV) to distill knowledge concurrently from any neural network architecture for text classification, captured as a lightweight interpretable/explainable classifier. Across multiple datasets, our approach achieves better performance than the target black-box. In addition, our approach provides better explanations than existing techniques.</abstract>
<identifier type="citekey">bashier-etal-2021-disk</identifier>
<identifier type="doi">10.18653/v1/2021.eacl-main.263</identifier>
<location>
<url>https://aclanthology.org/2021.eacl-main.263</url>
</location>
<part>
<date>2021-04</date>
<extent unit="page">
<start>3021</start>
<end>3030</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T DISK-CSV: Distilling Interpretable Semantic Knowledge with a Class Semantic Vector
%A Bashier, Housam Khalifa
%A Kim, Mi-Young
%A Goebel, Randy
%Y Merlo, Paola
%Y Tiedemann, Jorg
%Y Tsarfaty, Reut
%S Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume
%D 2021
%8 April
%I Association for Computational Linguistics
%C Online
%F bashier-etal-2021-disk
%X Neural networks (NN) applied to natural language processing (NLP) are becoming deeper and more complex, making them increasingly difficult to understand and interpret. Even in applications of limited scope on fixed data, the creation of these complex “black-boxes” creates substantial challenges for debugging, understanding, and generalization. But rapid development in this field has now lead to building more straightforward and interpretable models. We propose a new technique (DISK-CSV) to distill knowledge concurrently from any neural network architecture for text classification, captured as a lightweight interpretable/explainable classifier. Across multiple datasets, our approach achieves better performance than the target black-box. In addition, our approach provides better explanations than existing techniques.
%R 10.18653/v1/2021.eacl-main.263
%U https://aclanthology.org/2021.eacl-main.263
%U https://doi.org/10.18653/v1/2021.eacl-main.263
%P 3021-3030
Markdown (Informal)
[DISK-CSV: Distilling Interpretable Semantic Knowledge with a Class Semantic Vector](https://aclanthology.org/2021.eacl-main.263) (Bashier et al., EACL 2021)
ACL