@inproceedings{zukerman-maruf-2024-communicating-uncertainty,
title = "Communicating Uncertainty in Explanations of the Outcomes of Machine Learning Models",
author = "Zukerman, Ingrid and
Maruf, Sameen",
editor = "Mahamood, Saad and
Minh, Nguyen Le and
Ippolito, Daphne",
booktitle = "Proceedings of the 17th International Natural Language Generation Conference",
month = sep,
year = "2024",
address = "Tokyo, Japan",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.inlg-main.4",
pages = "30--46",
abstract = "We consider two types of numeric representations for conveying the uncertainty of predictions made by Machine Learning (ML) models: confidence-based (e.g., {``}the AI is 90{\%} confident{''}) and frequency-based (e.g., {``}the AI was correct in 180 (90{\%}) out of 200 cases{''}). We conducted a user study to determine which factors influence users{'} acceptance of predictions made by ML models, and how the two types of uncertainty representations affect users{'} views about explanations. Our results show that users{'} acceptance of ML model predictions depends mainly on the models{'} confidence, and that explanations that include uncertainty information are deemed better in several respects than explanations that omit it, with frequency-based representations being deemed better than confidence-based representations.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zukerman-maruf-2024-communicating-uncertainty">
<titleInfo>
<title>Communicating Uncertainty in Explanations of the Outcomes of Machine Learning Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ingrid</namePart>
<namePart type="family">Zukerman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sameen</namePart>
<namePart type="family">Maruf</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 17th International Natural Language Generation Conference</title>
</titleInfo>
<name type="personal">
<namePart type="given">Saad</namePart>
<namePart type="family">Mahamood</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nguyen</namePart>
<namePart type="given">Le</namePart>
<namePart type="family">Minh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daphne</namePart>
<namePart type="family">Ippolito</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Tokyo, Japan</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We consider two types of numeric representations for conveying the uncertainty of predictions made by Machine Learning (ML) models: confidence-based (e.g., “the AI is 90% confident”) and frequency-based (e.g., “the AI was correct in 180 (90%) out of 200 cases”). We conducted a user study to determine which factors influence users’ acceptance of predictions made by ML models, and how the two types of uncertainty representations affect users’ views about explanations. Our results show that users’ acceptance of ML model predictions depends mainly on the models’ confidence, and that explanations that include uncertainty information are deemed better in several respects than explanations that omit it, with frequency-based representations being deemed better than confidence-based representations.</abstract>
<identifier type="citekey">zukerman-maruf-2024-communicating-uncertainty</identifier>
<location>
<url>https://aclanthology.org/2024.inlg-main.4</url>
</location>
<part>
<date>2024-09</date>
<extent unit="page">
<start>30</start>
<end>46</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Communicating Uncertainty in Explanations of the Outcomes of Machine Learning Models
%A Zukerman, Ingrid
%A Maruf, Sameen
%Y Mahamood, Saad
%Y Minh, Nguyen Le
%Y Ippolito, Daphne
%S Proceedings of the 17th International Natural Language Generation Conference
%D 2024
%8 September
%I Association for Computational Linguistics
%C Tokyo, Japan
%F zukerman-maruf-2024-communicating-uncertainty
%X We consider two types of numeric representations for conveying the uncertainty of predictions made by Machine Learning (ML) models: confidence-based (e.g., “the AI is 90% confident”) and frequency-based (e.g., “the AI was correct in 180 (90%) out of 200 cases”). We conducted a user study to determine which factors influence users’ acceptance of predictions made by ML models, and how the two types of uncertainty representations affect users’ views about explanations. Our results show that users’ acceptance of ML model predictions depends mainly on the models’ confidence, and that explanations that include uncertainty information are deemed better in several respects than explanations that omit it, with frequency-based representations being deemed better than confidence-based representations.
%U https://aclanthology.org/2024.inlg-main.4
%P 30-46
Markdown (Informal)
[Communicating Uncertainty in Explanations of the Outcomes of Machine Learning Models](https://aclanthology.org/2024.inlg-main.4) (Zukerman & Maruf, INLG 2024)
ACL