@InProceedings{chiyahgarcia-EtAl:2018:W18-65,
  author    = {Chiyah Garcia, Francisco Javier  and  Robb, David A  and  Liu, Xingkun  and  Laskov, Atanas  and  Patron, Pedro  and  Hastie, Helen},
  title     = {Explainable Autonomy: A Study of Explanation Styles for Building Clear Mental Models},
  booktitle = {Proceedings of the 11th International Conference on Natural Language Generation},
  month     = {September},
  year      = {2018},
  address   = {Tilburg University, The Netherlands},
  publisher = {Association for Computational Linguistics},
  pages     = {99--108},
  abstract  = {As vehicles become more autonomous, it is important to maintain a level of transparency about their behaviour and how they work. This is particularly important in remote locations where they cannot be observed. Here, we describe a natural language chat interface that enables the reasoning behind the behaviour of underwater vehicles to be queried. We do this by deriving an interpretable model of autonomy through having an expert `speak out-loud' and provide various levels of detail based on this model. We corroborate previous research that has shown that it is important to inform the user of all possible explanations (high completeness) for improving the user's general mental model of how a system works. For understanding specific behaviours, a high level of completeness is similarly important, however, we show it is better to have the multiple explanations worded in general terms (low soundness). This work has implications for designing interfaces for autonomy as well as for explainable AI and operator training.},
  url       = {http://www.aclweb.org/anthology/W18-6511}
}

