@inproceedings{joshi-2025-evaluating,
title = "Evaluating Human Perception and Bias in {AI}-Generated Humor",
author = "Joshi, Narendra Nath",
editor = "Hempelmann, Christian F. and
Rayz, Julia and
Dong, Tiansi and
Miller, Tristan",
booktitle = "Proceedings of the 1st Workshop on Computational Humor (CHum)",
month = jan,
year = "2025",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.chum-1.9/",
pages = "79--87",
abstract = "This paper explores human perception of AI-generated humor, examining biases and the ability to distinguish between human and AI-created jokes. Through a between-subjects user study involving 174 participants, we tested hypotheses on quality perception, source identification, and demographic influences. Our findings reveal that AI-generated jokes are rated comparably to human-generated ones, with source blindness improving AI humor ratings. Participants struggled to identify AI-generated jokes accurately, and repeated exposure led to increased appreciation. Younger participants showed more favorable perceptions, while technical background had no significant impact. These results challenge preconceptions about AI`s humor capabilities and highlight the importance of addressing biases in AI content evaluation. We also suggest pathways for enhancing human-AI creative collaboration and underscore the need for transparency and ethical considerations in AI-generated content."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="joshi-2025-evaluating">
<titleInfo>
<title>Evaluating Human Perception and Bias in AI-Generated Humor</title>
</titleInfo>
<name type="personal">
<namePart type="given">Narendra</namePart>
<namePart type="given">Nath</namePart>
<namePart type="family">Joshi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-01</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Computational Humor (CHum)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christian</namePart>
<namePart type="given">F</namePart>
<namePart type="family">Hempelmann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julia</namePart>
<namePart type="family">Rayz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tiansi</namePart>
<namePart type="family">Dong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tristan</namePart>
<namePart type="family">Miller</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper explores human perception of AI-generated humor, examining biases and the ability to distinguish between human and AI-created jokes. Through a between-subjects user study involving 174 participants, we tested hypotheses on quality perception, source identification, and demographic influences. Our findings reveal that AI-generated jokes are rated comparably to human-generated ones, with source blindness improving AI humor ratings. Participants struggled to identify AI-generated jokes accurately, and repeated exposure led to increased appreciation. Younger participants showed more favorable perceptions, while technical background had no significant impact. These results challenge preconceptions about AI‘s humor capabilities and highlight the importance of addressing biases in AI content evaluation. We also suggest pathways for enhancing human-AI creative collaboration and underscore the need for transparency and ethical considerations in AI-generated content.</abstract>
<identifier type="citekey">joshi-2025-evaluating</identifier>
<location>
<url>https://aclanthology.org/2025.chum-1.9/</url>
</location>
<part>
<date>2025-01</date>
<extent unit="page">
<start>79</start>
<end>87</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Evaluating Human Perception and Bias in AI-Generated Humor
%A Joshi, Narendra Nath
%Y Hempelmann, Christian F.
%Y Rayz, Julia
%Y Dong, Tiansi
%Y Miller, Tristan
%S Proceedings of the 1st Workshop on Computational Humor (CHum)
%D 2025
%8 January
%I Association for Computational Linguistics
%C Online
%F joshi-2025-evaluating
%X This paper explores human perception of AI-generated humor, examining biases and the ability to distinguish between human and AI-created jokes. Through a between-subjects user study involving 174 participants, we tested hypotheses on quality perception, source identification, and demographic influences. Our findings reveal that AI-generated jokes are rated comparably to human-generated ones, with source blindness improving AI humor ratings. Participants struggled to identify AI-generated jokes accurately, and repeated exposure led to increased appreciation. Younger participants showed more favorable perceptions, while technical background had no significant impact. These results challenge preconceptions about AI‘s humor capabilities and highlight the importance of addressing biases in AI content evaluation. We also suggest pathways for enhancing human-AI creative collaboration and underscore the need for transparency and ethical considerations in AI-generated content.
%U https://aclanthology.org/2025.chum-1.9/
%P 79-87
Markdown (Informal)
[Evaluating Human Perception and Bias in AI-Generated Humor](https://aclanthology.org/2025.chum-1.9/) (Joshi, chum 2025)
ACL