@inproceedings{kunz-kuhlmann-2024-properties,
title = "Properties and Challenges of {LLM}-Generated Explanations",
author = "Kunz, Jenny and
Kuhlmann, Marco",
editor = "Blodgett, Su Lin and
Curry, Amanda Cercas and
Dev, Sunipa and
Madaio, Michael and
Nenkova, Ani and
Yang, Diyi and
Xiao, Ziang",
booktitle = "Proceedings of the Third Workshop on Bridging Human--Computer Interaction and Natural Language Processing",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.hcinlp-1.2",
doi = "10.18653/v1/2024.hcinlp-1.2",
pages = "13--27",
abstract = "The self-rationalising capabilities of large language models (LLMs) have been explored in restricted settings, using task-specific data sets.However, current LLMs do not (only) rely on specifically annotated data; nonetheless, they frequently explain their outputs.The properties of the generated explanations are influenced by the pre-training corpus and by the target data used for instruction fine-tuning.As the pre-training corpus includes a large amount of human-written explanations {``}in the wild{''}, we hypothesise that LLMs adopt common properties of human explanations.By analysing the outputs for a multi-domain instruction fine-tuning data set, we find that generated explanations show selectivity and contain illustrative elements, but less frequently are subjective or misleading.We discuss reasons and consequences of the properties{'} presence or absence. In particular, we outline positive and negative implications depending on the goals and user groups of the self-rationalising system.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kunz-kuhlmann-2024-properties">
<titleInfo>
<title>Properties and Challenges of LLM-Generated Explanations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jenny</namePart>
<namePart type="family">Kunz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marco</namePart>
<namePart type="family">Kuhlmann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Workshop on Bridging Human–Computer Interaction and Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Su</namePart>
<namePart type="given">Lin</namePart>
<namePart type="family">Blodgett</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amanda</namePart>
<namePart type="given">Cercas</namePart>
<namePart type="family">Curry</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sunipa</namePart>
<namePart type="family">Dev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Madaio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ani</namePart>
<namePart type="family">Nenkova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Diyi</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ziang</namePart>
<namePart type="family">Xiao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The self-rationalising capabilities of large language models (LLMs) have been explored in restricted settings, using task-specific data sets.However, current LLMs do not (only) rely on specifically annotated data; nonetheless, they frequently explain their outputs.The properties of the generated explanations are influenced by the pre-training corpus and by the target data used for instruction fine-tuning.As the pre-training corpus includes a large amount of human-written explanations “in the wild”, we hypothesise that LLMs adopt common properties of human explanations.By analysing the outputs for a multi-domain instruction fine-tuning data set, we find that generated explanations show selectivity and contain illustrative elements, but less frequently are subjective or misleading.We discuss reasons and consequences of the properties’ presence or absence. In particular, we outline positive and negative implications depending on the goals and user groups of the self-rationalising system.</abstract>
<identifier type="citekey">kunz-kuhlmann-2024-properties</identifier>
<identifier type="doi">10.18653/v1/2024.hcinlp-1.2</identifier>
<location>
<url>https://aclanthology.org/2024.hcinlp-1.2</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>13</start>
<end>27</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Properties and Challenges of LLM-Generated Explanations
%A Kunz, Jenny
%A Kuhlmann, Marco
%Y Blodgett, Su Lin
%Y Curry, Amanda Cercas
%Y Dev, Sunipa
%Y Madaio, Michael
%Y Nenkova, Ani
%Y Yang, Diyi
%Y Xiao, Ziang
%S Proceedings of the Third Workshop on Bridging Human–Computer Interaction and Natural Language Processing
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F kunz-kuhlmann-2024-properties
%X The self-rationalising capabilities of large language models (LLMs) have been explored in restricted settings, using task-specific data sets.However, current LLMs do not (only) rely on specifically annotated data; nonetheless, they frequently explain their outputs.The properties of the generated explanations are influenced by the pre-training corpus and by the target data used for instruction fine-tuning.As the pre-training corpus includes a large amount of human-written explanations “in the wild”, we hypothesise that LLMs adopt common properties of human explanations.By analysing the outputs for a multi-domain instruction fine-tuning data set, we find that generated explanations show selectivity and contain illustrative elements, but less frequently are subjective or misleading.We discuss reasons and consequences of the properties’ presence or absence. In particular, we outline positive and negative implications depending on the goals and user groups of the self-rationalising system.
%R 10.18653/v1/2024.hcinlp-1.2
%U https://aclanthology.org/2024.hcinlp-1.2
%U https://doi.org/10.18653/v1/2024.hcinlp-1.2
%P 13-27
Markdown (Informal)
[Properties and Challenges of LLM-Generated Explanations](https://aclanthology.org/2024.hcinlp-1.2) (Kunz & Kuhlmann, HCINLP-WS 2024)
ACL
- Jenny Kunz and Marco Kuhlmann. 2024. Properties and Challenges of LLM-Generated Explanations. In Proceedings of the Third Workshop on Bridging Human--Computer Interaction and Natural Language Processing, pages 13–27, Mexico City, Mexico. Association for Computational Linguistics.