@inproceedings{zheng-etal-2022-irrationality,
title = "The Irrationality of Neural Rationale Models",
author = "Zheng, Yiming and
Booth, Serena and
Shah, Julie and
Zhou, Yilun",
editor = "Verma, Apurv and
Pruksachatkun, Yada and
Chang, Kai-Wei and
Galstyan, Aram and
Dhamala, Jwala and
Cao, Yang Trista",
booktitle = "Proceedings of the 2nd Workshop on Trustworthy Natural Language Processing (TrustNLP 2022)",
month = jul,
year = "2022",
address = "Seattle, U.S.A.",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.trustnlp-1.6",
doi = "10.18653/v1/2022.trustnlp-1.6",
pages = "64--73",
abstract = "Neural rationale models are popular for interpretable predictions of NLP tasks. In these, a selector extracts segments of the input text, called rationales, and passes these segments to a classifier for prediction. Since the rationale is the only information accessible to the classifier, it is plausibly defined as the explanation. Is such a characterization unconditionally correct? In this paper, we argue to the contrary, with both philosophical perspectives and empirical evidence suggesting that rationale models are, perhaps, less rational and interpretable than expected. We call for more rigorous evaluations of these models to ensure desired properties of interpretability are indeed achieved. The code for our experiments is at \url{https://github.com/yimingz89/Neural-Rationale-Analysis}.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zheng-etal-2022-irrationality">
<titleInfo>
<title>The Irrationality of Neural Rationale Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yiming</namePart>
<namePart type="family">Zheng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Serena</namePart>
<namePart type="family">Booth</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julie</namePart>
<namePart type="family">Shah</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yilun</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Workshop on Trustworthy Natural Language Processing (TrustNLP 2022)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Apurv</namePart>
<namePart type="family">Verma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yada</namePart>
<namePart type="family">Pruksachatkun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kai-Wei</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aram</namePart>
<namePart type="family">Galstyan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jwala</namePart>
<namePart type="family">Dhamala</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="given">Trista</namePart>
<namePart type="family">Cao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, U.S.A.</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Neural rationale models are popular for interpretable predictions of NLP tasks. In these, a selector extracts segments of the input text, called rationales, and passes these segments to a classifier for prediction. Since the rationale is the only information accessible to the classifier, it is plausibly defined as the explanation. Is such a characterization unconditionally correct? In this paper, we argue to the contrary, with both philosophical perspectives and empirical evidence suggesting that rationale models are, perhaps, less rational and interpretable than expected. We call for more rigorous evaluations of these models to ensure desired properties of interpretability are indeed achieved. The code for our experiments is at https://github.com/yimingz89/Neural-Rationale-Analysis.</abstract>
<identifier type="citekey">zheng-etal-2022-irrationality</identifier>
<identifier type="doi">10.18653/v1/2022.trustnlp-1.6</identifier>
<location>
<url>https://aclanthology.org/2022.trustnlp-1.6</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>64</start>
<end>73</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T The Irrationality of Neural Rationale Models
%A Zheng, Yiming
%A Booth, Serena
%A Shah, Julie
%A Zhou, Yilun
%Y Verma, Apurv
%Y Pruksachatkun, Yada
%Y Chang, Kai-Wei
%Y Galstyan, Aram
%Y Dhamala, Jwala
%Y Cao, Yang Trista
%S Proceedings of the 2nd Workshop on Trustworthy Natural Language Processing (TrustNLP 2022)
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, U.S.A.
%F zheng-etal-2022-irrationality
%X Neural rationale models are popular for interpretable predictions of NLP tasks. In these, a selector extracts segments of the input text, called rationales, and passes these segments to a classifier for prediction. Since the rationale is the only information accessible to the classifier, it is plausibly defined as the explanation. Is such a characterization unconditionally correct? In this paper, we argue to the contrary, with both philosophical perspectives and empirical evidence suggesting that rationale models are, perhaps, less rational and interpretable than expected. We call for more rigorous evaluations of these models to ensure desired properties of interpretability are indeed achieved. The code for our experiments is at https://github.com/yimingz89/Neural-Rationale-Analysis.
%R 10.18653/v1/2022.trustnlp-1.6
%U https://aclanthology.org/2022.trustnlp-1.6
%U https://doi.org/10.18653/v1/2022.trustnlp-1.6
%P 64-73
Markdown (Informal)
[The Irrationality of Neural Rationale Models](https://aclanthology.org/2022.trustnlp-1.6) (Zheng et al., TrustNLP 2022)
ACL
- Yiming Zheng, Serena Booth, Julie Shah, and Yilun Zhou. 2022. The Irrationality of Neural Rationale Models. In Proceedings of the 2nd Workshop on Trustworthy Natural Language Processing (TrustNLP 2022), pages 64–73, Seattle, U.S.A.. Association for Computational Linguistics.