@inproceedings{shekarpour-etal-2020-qa2explanation,
title = "{QA}2{E}xplanation: Generating and Evaluating Explanations for Question Answering Systems over Knowledge Graph",
author = "Shekarpour, Saeedeh and
Nadgeri, Abhishek and
Singh, Kuldeep",
editor = "Bogin, Ben and
Iyer, Srinivasan and
Lin, Xi Victoria and
Radev, Dragomir and
Suhr, Alane and
{Panupong} and
Xiong, Caiming and
Yin, Pengcheng and
Yu, Tao and
Zhang, Rui and
Zhong, Victor",
booktitle = "Proceedings of the First Workshop on Interactive and Executable Semantic Parsing",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.intexsempar-1.1",
doi = "10.18653/v1/2020.intexsempar-1.1",
pages = "1--11",
abstract = "In the era of Big Knowledge Graphs, Question Answering (QA) systems have reached a milestone in their performance and feasibility. However, their applicability, particularly in specific domains such as the biomedical domain, has not gained wide acceptance due to their {``}black box{''} nature, which hinders transparency, fairness, and accountability of QA systems. Therefore, users are unable to understand how and why particular questions have been answered, whereas some others fail. To address this challenge, in this paper, we develop an automatic approach for generating explanations during various stages of a pipeline-based QA system. Our approach is a supervised and automatic approach which considers three classes (i.e., success, no answer, and wrong answer) for annotating the output of involved QA components. Upon our prediction, a template explanation is chosen and integrated into the output of the corresponding component. To measure the effectiveness of the approach, we conducted a user survey as to how non-expert users perceive our generated explanations. The results of our study show a significant increase in the four dimensions of the human factor from the Human-computer interaction community.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="shekarpour-etal-2020-qa2explanation">
<titleInfo>
<title>QA2Explanation: Generating and Evaluating Explanations for Question Answering Systems over Knowledge Graph</title>
</titleInfo>
<name type="personal">
<namePart type="given">Saeedeh</namePart>
<namePart type="family">Shekarpour</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Abhishek</namePart>
<namePart type="family">Nadgeri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kuldeep</namePart>
<namePart type="family">Singh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Interactive and Executable Semantic Parsing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ben</namePart>
<namePart type="family">Bogin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Srinivasan</namePart>
<namePart type="family">Iyer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xi</namePart>
<namePart type="given">Victoria</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dragomir</namePart>
<namePart type="family">Radev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alane</namePart>
<namePart type="family">Suhr</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name>
<namePart>Panupong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Caiming</namePart>
<namePart type="family">Xiong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pengcheng</namePart>
<namePart type="family">Yin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tao</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rui</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Victor</namePart>
<namePart type="family">Zhong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In the era of Big Knowledge Graphs, Question Answering (QA) systems have reached a milestone in their performance and feasibility. However, their applicability, particularly in specific domains such as the biomedical domain, has not gained wide acceptance due to their “black box” nature, which hinders transparency, fairness, and accountability of QA systems. Therefore, users are unable to understand how and why particular questions have been answered, whereas some others fail. To address this challenge, in this paper, we develop an automatic approach for generating explanations during various stages of a pipeline-based QA system. Our approach is a supervised and automatic approach which considers three classes (i.e., success, no answer, and wrong answer) for annotating the output of involved QA components. Upon our prediction, a template explanation is chosen and integrated into the output of the corresponding component. To measure the effectiveness of the approach, we conducted a user survey as to how non-expert users perceive our generated explanations. The results of our study show a significant increase in the four dimensions of the human factor from the Human-computer interaction community.</abstract>
<identifier type="citekey">shekarpour-etal-2020-qa2explanation</identifier>
<identifier type="doi">10.18653/v1/2020.intexsempar-1.1</identifier>
<location>
<url>https://aclanthology.org/2020.intexsempar-1.1</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>1</start>
<end>11</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T QA2Explanation: Generating and Evaluating Explanations for Question Answering Systems over Knowledge Graph
%A Shekarpour, Saeedeh
%A Nadgeri, Abhishek
%A Singh, Kuldeep
%Y Bogin, Ben
%Y Iyer, Srinivasan
%Y Lin, Xi Victoria
%Y Radev, Dragomir
%Y Suhr, Alane
%Y Xiong, Caiming
%Y Yin, Pengcheng
%Y Yu, Tao
%Y Zhang, Rui
%Y Zhong, Victor
%E Panupong
%S Proceedings of the First Workshop on Interactive and Executable Semantic Parsing
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F shekarpour-etal-2020-qa2explanation
%X In the era of Big Knowledge Graphs, Question Answering (QA) systems have reached a milestone in their performance and feasibility. However, their applicability, particularly in specific domains such as the biomedical domain, has not gained wide acceptance due to their “black box” nature, which hinders transparency, fairness, and accountability of QA systems. Therefore, users are unable to understand how and why particular questions have been answered, whereas some others fail. To address this challenge, in this paper, we develop an automatic approach for generating explanations during various stages of a pipeline-based QA system. Our approach is a supervised and automatic approach which considers three classes (i.e., success, no answer, and wrong answer) for annotating the output of involved QA components. Upon our prediction, a template explanation is chosen and integrated into the output of the corresponding component. To measure the effectiveness of the approach, we conducted a user survey as to how non-expert users perceive our generated explanations. The results of our study show a significant increase in the four dimensions of the human factor from the Human-computer interaction community.
%R 10.18653/v1/2020.intexsempar-1.1
%U https://aclanthology.org/2020.intexsempar-1.1
%U https://doi.org/10.18653/v1/2020.intexsempar-1.1
%P 1-11
Markdown (Informal)
[QA2Explanation: Generating and Evaluating Explanations for Question Answering Systems over Knowledge Graph](https://aclanthology.org/2020.intexsempar-1.1) (Shekarpour et al., intexsempar 2020)
ACL