@inproceedings{krause-vossen-2020-explain,
title = "When to explain: Identifying explanation triggers in human-agent interaction",
author = "Krause, Lea and
Vossen, Piek",
editor = "Alonso, Jose M. and
Catala, Alejandro",
booktitle = "2nd Workshop on Interactive Natural Language Technology for Explainable Artificial Intelligence",
month = nov,
year = "2020",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.nl4xai-1.12",
pages = "55--60",
abstract = "With more agents deployed than ever, users need to be able to interact and cooperate with them in an effective and comfortable manner. Explanations have been shown to increase the understanding and trust of a user in human-agent interaction. There have been numerous studies investigating this effect, but they rely on the user explicitly requesting an explanation. We propose a first overview of when an explanation should be triggered and show that there are many instances that would be missed if the agent solely relies on direct questions. For this, we differentiate between direct triggers such as commands or questions and introduce indirect triggers like confusion or uncertainty detection.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="krause-vossen-2020-explain">
<titleInfo>
<title>When to explain: Identifying explanation triggers in human-agent interaction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lea</namePart>
<namePart type="family">Krause</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Piek</namePart>
<namePart type="family">Vossen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>2nd Workshop on Interactive Natural Language Technology for Explainable Artificial Intelligence</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jose</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Alonso</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alejandro</namePart>
<namePart type="family">Catala</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>With more agents deployed than ever, users need to be able to interact and cooperate with them in an effective and comfortable manner. Explanations have been shown to increase the understanding and trust of a user in human-agent interaction. There have been numerous studies investigating this effect, but they rely on the user explicitly requesting an explanation. We propose a first overview of when an explanation should be triggered and show that there are many instances that would be missed if the agent solely relies on direct questions. For this, we differentiate between direct triggers such as commands or questions and introduce indirect triggers like confusion or uncertainty detection.</abstract>
<identifier type="citekey">krause-vossen-2020-explain</identifier>
<location>
<url>https://aclanthology.org/2020.nl4xai-1.12</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>55</start>
<end>60</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T When to explain: Identifying explanation triggers in human-agent interaction
%A Krause, Lea
%A Vossen, Piek
%Y Alonso, Jose M.
%Y Catala, Alejandro
%S 2nd Workshop on Interactive Natural Language Technology for Explainable Artificial Intelligence
%D 2020
%8 November
%I Association for Computational Linguistics
%C Dublin, Ireland
%F krause-vossen-2020-explain
%X With more agents deployed than ever, users need to be able to interact and cooperate with them in an effective and comfortable manner. Explanations have been shown to increase the understanding and trust of a user in human-agent interaction. There have been numerous studies investigating this effect, but they rely on the user explicitly requesting an explanation. We propose a first overview of when an explanation should be triggered and show that there are many instances that would be missed if the agent solely relies on direct questions. For this, we differentiate between direct triggers such as commands or questions and introduce indirect triggers like confusion or uncertainty detection.
%U https://aclanthology.org/2020.nl4xai-1.12
%P 55-60
Markdown (Informal)
[When to explain: Identifying explanation triggers in human-agent interaction](https://aclanthology.org/2020.nl4xai-1.12) (Krause & Vossen, NL4XAI 2020)
ACL