@inproceedings{kokalj-etal-2021-bert,
title = "{BERT} meets Shapley: Extending {SHAP} Explanations to Transformer-based Classifiers",
author = "Kokalj, Enja and
{\v{S}}krlj, Bla{\v{z}} and
Lavra{\v{c}}, Nada and
Pollak, Senja and
Robnik-{\v{S}}ikonja, Marko",
editor = "Toivonen, Hannu and
Boggia, Michele",
booktitle = "Proceedings of the EACL Hackashop on News Media Content Analysis and Automated Report Generation",
month = apr,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.hackashop-1.3",
pages = "16--21",
abstract = "Transformer-based neural networks offer very good classification performance across a wide range of domains, but do not provide explanations of their predictions. While several explanation methods, including SHAP, address the problem of interpreting deep learning models, they are not adapted to operate on state-of-the-art transformer-based neural networks such as BERT. Another shortcoming of these methods is that their visualization of explanations in the form of lists of most relevant words does not take into account the sequential and structurally dependent nature of text. This paper proposes the TransSHAP method that adapts SHAP to transformer models including BERT-based text classifiers. It advances SHAP visualizations by showing explanations in a sequential manner, assessed by human evaluators as competitive to state-of-the-art solutions.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kokalj-etal-2021-bert">
<titleInfo>
<title>BERT meets Shapley: Extending SHAP Explanations to Transformer-based Classifiers</title>
</titleInfo>
<name type="personal">
<namePart type="given">Enja</namePart>
<namePart type="family">Kokalj</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Blaž</namePart>
<namePart type="family">Škrlj</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nada</namePart>
<namePart type="family">Lavrač</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Senja</namePart>
<namePart type="family">Pollak</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marko</namePart>
<namePart type="family">Robnik-Šikonja</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the EACL Hackashop on News Media Content Analysis and Automated Report Generation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hannu</namePart>
<namePart type="family">Toivonen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michele</namePart>
<namePart type="family">Boggia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Transformer-based neural networks offer very good classification performance across a wide range of domains, but do not provide explanations of their predictions. While several explanation methods, including SHAP, address the problem of interpreting deep learning models, they are not adapted to operate on state-of-the-art transformer-based neural networks such as BERT. Another shortcoming of these methods is that their visualization of explanations in the form of lists of most relevant words does not take into account the sequential and structurally dependent nature of text. This paper proposes the TransSHAP method that adapts SHAP to transformer models including BERT-based text classifiers. It advances SHAP visualizations by showing explanations in a sequential manner, assessed by human evaluators as competitive to state-of-the-art solutions.</abstract>
<identifier type="citekey">kokalj-etal-2021-bert</identifier>
<location>
<url>https://aclanthology.org/2021.hackashop-1.3</url>
</location>
<part>
<date>2021-04</date>
<extent unit="page">
<start>16</start>
<end>21</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T BERT meets Shapley: Extending SHAP Explanations to Transformer-based Classifiers
%A Kokalj, Enja
%A Škrlj, Blaž
%A Lavrač, Nada
%A Pollak, Senja
%A Robnik-Šikonja, Marko
%Y Toivonen, Hannu
%Y Boggia, Michele
%S Proceedings of the EACL Hackashop on News Media Content Analysis and Automated Report Generation
%D 2021
%8 April
%I Association for Computational Linguistics
%C Online
%F kokalj-etal-2021-bert
%X Transformer-based neural networks offer very good classification performance across a wide range of domains, but do not provide explanations of their predictions. While several explanation methods, including SHAP, address the problem of interpreting deep learning models, they are not adapted to operate on state-of-the-art transformer-based neural networks such as BERT. Another shortcoming of these methods is that their visualization of explanations in the form of lists of most relevant words does not take into account the sequential and structurally dependent nature of text. This paper proposes the TransSHAP method that adapts SHAP to transformer models including BERT-based text classifiers. It advances SHAP visualizations by showing explanations in a sequential manner, assessed by human evaluators as competitive to state-of-the-art solutions.
%U https://aclanthology.org/2021.hackashop-1.3
%P 16-21
Markdown (Informal)
[BERT meets Shapley: Extending SHAP Explanations to Transformer-based Classifiers](https://aclanthology.org/2021.hackashop-1.3) (Kokalj et al., Hackashop 2021)
ACL