@inproceedings{gunser-etal-2022-pure,
title = "The Pure Poet: How Good is the Subjective Credibility and Stylistic Quality of Literary Short Texts Written with an Artificial Intelligence Tool as Compared to Texts Written by Human Authors?",
author = "Gunser, Vivian Emily and
Gottschling, Steffen and
Brucker, Birgit and
Richter, Sandra and
{\c{C}}akir, D{\^\i}lan Canan and
Gerjets, Peter",
editor = "Huang, Ting-Hao 'Kenneth' and
Raheja, Vipul and
Kang, Dongyeop and
Chung, John Joon Young and
Gissin, Daniel and
Lee, Mina and
Gero, Katy Ilonka",
booktitle = "Proceedings of the First Workshop on Intelligent and Interactive Writing Assistants (In2Writing 2022)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.in2writing-1.8",
doi = "10.18653/v1/2022.in2writing-1.8",
pages = "60--61",
abstract = "The application of artificial intelligence (AI) for text generation in creative domains raises questions regarding the credibility of AI-generated content. In two studies, we explored if readers can differentiate between AI-based and human-written texts (generated based on the first line of texts and poems of classic authors) and how the stylistic qualities of these texts are rated. Participants read 9 AI-based continuations and either 9 human-written continuations (Study 1, N=120) or 9 original continuations (Study 2, N=302). Participants{'} task was to decide whether a continuation was written with an AI-tool or not, to indicate their confidence in each decision, and to assess the stylistic text quality. Results showed that participants generally had low accuracy for differentiating between text types but were overconfident in their decisions. Regarding the assessment of stylistic quality, AI-continuations were perceived as less well-written, inspiring, fascinating, interesting, and aesthetic than both human-written and original continuations.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gunser-etal-2022-pure">
<titleInfo>
<title>The Pure Poet: How Good is the Subjective Credibility and Stylistic Quality of Literary Short Texts Written with an Artificial Intelligence Tool as Compared to Texts Written by Human Authors?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vivian</namePart>
<namePart type="given">Emily</namePart>
<namePart type="family">Gunser</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steffen</namePart>
<namePart type="family">Gottschling</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Birgit</namePart>
<namePart type="family">Brucker</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sandra</namePart>
<namePart type="family">Richter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dîlan</namePart>
<namePart type="given">Canan</namePart>
<namePart type="family">Çakir</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peter</namePart>
<namePart type="family">Gerjets</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Intelligent and Interactive Writing Assistants (In2Writing 2022)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ting-Hao</namePart>
<namePart type="given">’Kenneth’</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vipul</namePart>
<namePart type="family">Raheja</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dongyeop</namePart>
<namePart type="family">Kang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">John</namePart>
<namePart type="given">Joon</namePart>
<namePart type="given">Young</namePart>
<namePart type="family">Chung</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Gissin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mina</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Katy</namePart>
<namePart type="given">Ilonka</namePart>
<namePart type="family">Gero</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The application of artificial intelligence (AI) for text generation in creative domains raises questions regarding the credibility of AI-generated content. In two studies, we explored if readers can differentiate between AI-based and human-written texts (generated based on the first line of texts and poems of classic authors) and how the stylistic qualities of these texts are rated. Participants read 9 AI-based continuations and either 9 human-written continuations (Study 1, N=120) or 9 original continuations (Study 2, N=302). Participants’ task was to decide whether a continuation was written with an AI-tool or not, to indicate their confidence in each decision, and to assess the stylistic text quality. Results showed that participants generally had low accuracy for differentiating between text types but were overconfident in their decisions. Regarding the assessment of stylistic quality, AI-continuations were perceived as less well-written, inspiring, fascinating, interesting, and aesthetic than both human-written and original continuations.</abstract>
<identifier type="citekey">gunser-etal-2022-pure</identifier>
<identifier type="doi">10.18653/v1/2022.in2writing-1.8</identifier>
<location>
<url>https://aclanthology.org/2022.in2writing-1.8</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>60</start>
<end>61</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T The Pure Poet: How Good is the Subjective Credibility and Stylistic Quality of Literary Short Texts Written with an Artificial Intelligence Tool as Compared to Texts Written by Human Authors?
%A Gunser, Vivian Emily
%A Gottschling, Steffen
%A Brucker, Birgit
%A Richter, Sandra
%A Çakir, Dîlan Canan
%A Gerjets, Peter
%Y Huang, Ting-Hao ’Kenneth’
%Y Raheja, Vipul
%Y Kang, Dongyeop
%Y Chung, John Joon Young
%Y Gissin, Daniel
%Y Lee, Mina
%Y Gero, Katy Ilonka
%S Proceedings of the First Workshop on Intelligent and Interactive Writing Assistants (In2Writing 2022)
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F gunser-etal-2022-pure
%X The application of artificial intelligence (AI) for text generation in creative domains raises questions regarding the credibility of AI-generated content. In two studies, we explored if readers can differentiate between AI-based and human-written texts (generated based on the first line of texts and poems of classic authors) and how the stylistic qualities of these texts are rated. Participants read 9 AI-based continuations and either 9 human-written continuations (Study 1, N=120) or 9 original continuations (Study 2, N=302). Participants’ task was to decide whether a continuation was written with an AI-tool or not, to indicate their confidence in each decision, and to assess the stylistic text quality. Results showed that participants generally had low accuracy for differentiating between text types but were overconfident in their decisions. Regarding the assessment of stylistic quality, AI-continuations were perceived as less well-written, inspiring, fascinating, interesting, and aesthetic than both human-written and original continuations.
%R 10.18653/v1/2022.in2writing-1.8
%U https://aclanthology.org/2022.in2writing-1.8
%U https://doi.org/10.18653/v1/2022.in2writing-1.8
%P 60-61
Markdown (Informal)
[The Pure Poet: How Good is the Subjective Credibility and Stylistic Quality of Literary Short Texts Written with an Artificial Intelligence Tool as Compared to Texts Written by Human Authors?](https://aclanthology.org/2022.in2writing-1.8) (Gunser et al., In2Writing 2022)
ACL