@inproceedings{bashier-etal-2020-rancc,
title = "{RANCC}: Rationalizing Neural Networks via Concept Clustering",
author = "Bashier, Housam Khalifa and
Kim, Mi-Young and
Goebel, Randy",
editor = "Scott, Donia and
Bel, Nuria and
Zong, Chengqing",
booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "International Committee on Computational Linguistics",
url = "https://aclanthology.org/2020.coling-main.286",
doi = "10.18653/v1/2020.coling-main.286",
pages = "3214--3224",
abstract = "We propose a new self-explainable model for Natural Language Processing (NLP) text classification tasks. Our approach constructs explanations concurrently with the formulation of classification predictions. To do so, we extract a rationale from the text, then use it to predict a concept of interest as the final prediction. We provide three types of explanations: 1) rationale extraction, 2) a measure of feature importance, and 3) clustering of concepts. In addition, we show how our model can be compressed without applying complicated compression techniques. We experimentally demonstrate our explainability approach on a number of well-known text classification datasets.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bashier-etal-2020-rancc">
<titleInfo>
<title>RANCC: Rationalizing Neural Networks via Concept Clustering</title>
</titleInfo>
<name type="personal">
<namePart type="given">Housam</namePart>
<namePart type="given">Khalifa</namePart>
<namePart type="family">Bashier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mi-Young</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Randy</namePart>
<namePart type="family">Goebel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 28th International Conference on Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Donia</namePart>
<namePart type="family">Scott</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nuria</namePart>
<namePart type="family">Bel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chengqing</namePart>
<namePart type="family">Zong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>International Committee on Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona, Spain (Online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We propose a new self-explainable model for Natural Language Processing (NLP) text classification tasks. Our approach constructs explanations concurrently with the formulation of classification predictions. To do so, we extract a rationale from the text, then use it to predict a concept of interest as the final prediction. We provide three types of explanations: 1) rationale extraction, 2) a measure of feature importance, and 3) clustering of concepts. In addition, we show how our model can be compressed without applying complicated compression techniques. We experimentally demonstrate our explainability approach on a number of well-known text classification datasets.</abstract>
<identifier type="citekey">bashier-etal-2020-rancc</identifier>
<identifier type="doi">10.18653/v1/2020.coling-main.286</identifier>
<location>
<url>https://aclanthology.org/2020.coling-main.286</url>
</location>
<part>
<date>2020-12</date>
<extent unit="page">
<start>3214</start>
<end>3224</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T RANCC: Rationalizing Neural Networks via Concept Clustering
%A Bashier, Housam Khalifa
%A Kim, Mi-Young
%A Goebel, Randy
%Y Scott, Donia
%Y Bel, Nuria
%Y Zong, Chengqing
%S Proceedings of the 28th International Conference on Computational Linguistics
%D 2020
%8 December
%I International Committee on Computational Linguistics
%C Barcelona, Spain (Online)
%F bashier-etal-2020-rancc
%X We propose a new self-explainable model for Natural Language Processing (NLP) text classification tasks. Our approach constructs explanations concurrently with the formulation of classification predictions. To do so, we extract a rationale from the text, then use it to predict a concept of interest as the final prediction. We provide three types of explanations: 1) rationale extraction, 2) a measure of feature importance, and 3) clustering of concepts. In addition, we show how our model can be compressed without applying complicated compression techniques. We experimentally demonstrate our explainability approach on a number of well-known text classification datasets.
%R 10.18653/v1/2020.coling-main.286
%U https://aclanthology.org/2020.coling-main.286
%U https://doi.org/10.18653/v1/2020.coling-main.286
%P 3214-3224
Markdown (Informal)
[RANCC: Rationalizing Neural Networks via Concept Clustering](https://aclanthology.org/2020.coling-main.286) (Bashier et al., COLING 2020)
ACL
- Housam Khalifa Bashier, Mi-Young Kim, and Randy Goebel. 2020. RANCC: Rationalizing Neural Networks via Concept Clustering. In Proceedings of the 28th International Conference on Computational Linguistics, pages 3214–3224, Barcelona, Spain (Online). International Committee on Computational Linguistics.