@inproceedings{liu-etal-2018-visual,
title = "Visual Interrogation of Attention-Based Models for Natural Language Inference and Machine Comprehension",
author = "Liu, Shusen and
Li, Tao and
Li, Zhimin and
Srikumar, Vivek and
Pascucci, Valerio and
Bremer, Peer-Timo",
editor = "Blanco, Eduardo and
Lu, Wei",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
month = nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D18-2007",
doi = "10.18653/v1/D18-2007",
pages = "36--41",
abstract = "Neural networks models have gained unprecedented popularity in natural language processing due to their state-of-the-art performance and the flexible end-to-end training scheme. Despite their advantages, the lack of interpretability hinders the deployment and refinement of the models. In this work, we present a flexible visualization library for creating customized visual analytic environments, in which the user can investigate and interrogate the relationships among the input, the model internals (i.e., attention), and the output predictions, which in turn shed light on the model decision-making process.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="liu-etal-2018-visual">
<titleInfo>
<title>Visual Interrogation of Attention-Based Models for Natural Language Inference and Machine Comprehension</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shusen</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tao</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhimin</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Valerio</namePart>
<namePart type="family">Pascucci</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peer-Timo</namePart>
<namePart type="family">Bremer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Eduardo</namePart>
<namePart type="family">Blanco</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wei</namePart>
<namePart type="family">Lu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Neural networks models have gained unprecedented popularity in natural language processing due to their state-of-the-art performance and the flexible end-to-end training scheme. Despite their advantages, the lack of interpretability hinders the deployment and refinement of the models. In this work, we present a flexible visualization library for creating customized visual analytic environments, in which the user can investigate and interrogate the relationships among the input, the model internals (i.e., attention), and the output predictions, which in turn shed light on the model decision-making process.</abstract>
<identifier type="citekey">liu-etal-2018-visual</identifier>
<identifier type="doi">10.18653/v1/D18-2007</identifier>
<location>
<url>https://aclanthology.org/D18-2007</url>
</location>
<part>
<date>2018-11</date>
<extent unit="page">
<start>36</start>
<end>41</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Visual Interrogation of Attention-Based Models for Natural Language Inference and Machine Comprehension
%A Liu, Shusen
%A Li, Tao
%A Li, Zhimin
%A Srikumar, Vivek
%A Pascucci, Valerio
%A Bremer, Peer-Timo
%Y Blanco, Eduardo
%Y Lu, Wei
%S Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations
%D 2018
%8 November
%I Association for Computational Linguistics
%C Brussels, Belgium
%F liu-etal-2018-visual
%X Neural networks models have gained unprecedented popularity in natural language processing due to their state-of-the-art performance and the flexible end-to-end training scheme. Despite their advantages, the lack of interpretability hinders the deployment and refinement of the models. In this work, we present a flexible visualization library for creating customized visual analytic environments, in which the user can investigate and interrogate the relationships among the input, the model internals (i.e., attention), and the output predictions, which in turn shed light on the model decision-making process.
%R 10.18653/v1/D18-2007
%U https://aclanthology.org/D18-2007
%U https://doi.org/10.18653/v1/D18-2007
%P 36-41
Markdown (Informal)
[Visual Interrogation of Attention-Based Models for Natural Language Inference and Machine Comprehension](https://aclanthology.org/D18-2007) (Liu et al., EMNLP 2018)
ACL