@inproceedings{tulli-etal-2020-learning,
title = "Learning from Explanations and Demonstrations: A Pilot Study",
author = {Tulli, Silvia and
Wallk{\"o}tter, Sebastian and
Paiva, Ana and
Melo, Francisco S. and
Chetouani, Mohamed},
booktitle = "2nd Workshop on Interactive Natural Language Technology for Explainable Artificial Intelligence",
month = nov,
year = "2020",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.nl4xai-1.13",
pages = "61--66",
abstract = "AI has become prominent in a growing number of systems, and, as a direct consequence, the desire for explainability in such systems has become prominent as well. To build explainable systems, a large portion of existing research uses various kinds of natural language technologies, e.g., text-to-speech mechanisms, or string visualizations. Here, we provide an overview of the challenges associated with natural language explanations by reviewing existing literature. Additionally, we discuss the relationship between explainability and knowledge transfer in reinforcement learning. We argue that explainability methods, in particular methods that model the recipient of an explanation, might help increasing sample efficiency. For this, we present a computational approach to optimize the learner{'}s performance using explanations of another agent and discuss our results in light of effective natural language explanations for humans.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tulli-etal-2020-learning">
<titleInfo>
<title>Learning from Explanations and Demonstrations: A Pilot Study</title>
</titleInfo>
<name type="personal">
<namePart type="given">Silvia</namePart>
<namePart type="family">Tulli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sebastian</namePart>
<namePart type="family">Wallkötter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ana</namePart>
<namePart type="family">Paiva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Francisco</namePart>
<namePart type="given">S</namePart>
<namePart type="family">Melo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohamed</namePart>
<namePart type="family">Chetouani</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>2nd Workshop on Interactive Natural Language Technology for Explainable Artificial Intelligence</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>AI has become prominent in a growing number of systems, and, as a direct consequence, the desire for explainability in such systems has become prominent as well. To build explainable systems, a large portion of existing research uses various kinds of natural language technologies, e.g., text-to-speech mechanisms, or string visualizations. Here, we provide an overview of the challenges associated with natural language explanations by reviewing existing literature. Additionally, we discuss the relationship between explainability and knowledge transfer in reinforcement learning. We argue that explainability methods, in particular methods that model the recipient of an explanation, might help increasing sample efficiency. For this, we present a computational approach to optimize the learner’s performance using explanations of another agent and discuss our results in light of effective natural language explanations for humans.</abstract>
<identifier type="citekey">tulli-etal-2020-learning</identifier>
<location>
<url>https://aclanthology.org/2020.nl4xai-1.13</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>61</start>
<end>66</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Learning from Explanations and Demonstrations: A Pilot Study
%A Tulli, Silvia
%A Wallkötter, Sebastian
%A Paiva, Ana
%A Melo, Francisco S.
%A Chetouani, Mohamed
%S 2nd Workshop on Interactive Natural Language Technology for Explainable Artificial Intelligence
%D 2020
%8 November
%I Association for Computational Linguistics
%C Dublin, Ireland
%F tulli-etal-2020-learning
%X AI has become prominent in a growing number of systems, and, as a direct consequence, the desire for explainability in such systems has become prominent as well. To build explainable systems, a large portion of existing research uses various kinds of natural language technologies, e.g., text-to-speech mechanisms, or string visualizations. Here, we provide an overview of the challenges associated with natural language explanations by reviewing existing literature. Additionally, we discuss the relationship between explainability and knowledge transfer in reinforcement learning. We argue that explainability methods, in particular methods that model the recipient of an explanation, might help increasing sample efficiency. For this, we present a computational approach to optimize the learner’s performance using explanations of another agent and discuss our results in light of effective natural language explanations for humans.
%U https://aclanthology.org/2020.nl4xai-1.13
%P 61-66
Markdown (Informal)
[Learning from Explanations and Demonstrations: A Pilot Study](https://aclanthology.org/2020.nl4xai-1.13) (Tulli et al., NL4XAI 2020)
ACL
- Silvia Tulli, Sebastian Wallkötter, Ana Paiva, Francisco S. Melo, and Mohamed Chetouani. 2020. Learning from Explanations and Demonstrations: A Pilot Study. In 2nd Workshop on Interactive Natural Language Technology for Explainable Artificial Intelligence, pages 61–66, Dublin, Ireland. Association for Computational Linguistics.