@inproceedings{lucy-bamman-2021-gender,
title = "Gender and Representation Bias in {GPT}-3 Generated Stories",
author = "Lucy, Li and
Bamman, David",
editor = "Akoury, Nader and
Brahman, Faeze and
Chaturvedi, Snigdha and
Clark, Elizabeth and
Iyyer, Mohit and
Martin, Lara J.",
booktitle = "Proceedings of the Third Workshop on Narrative Understanding",
month = jun,
year = "2021",
address = "Virtual",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.nuse-1.5",
doi = "10.18653/v1/2021.nuse-1.5",
pages = "48--55",
abstract = "Using topic modeling and lexicon-based word similarity, we find that stories generated by GPT-3 exhibit many known gender stereotypes. Generated stories depict different topics and descriptions depending on GPT-3{'}s perceived gender of the character in a prompt, with feminine characters more likely to be associated with family and appearance, and described as less powerful than masculine characters, even when associated with high power verbs in a prompt. Our study raises questions on how one can avoid unintended social biases when using large language models for storytelling.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lucy-bamman-2021-gender">
<titleInfo>
<title>Gender and Representation Bias in GPT-3 Generated Stories</title>
</titleInfo>
<name type="personal">
<namePart type="given">Li</namePart>
<namePart type="family">Lucy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Bamman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Workshop on Narrative Understanding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nader</namePart>
<namePart type="family">Akoury</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Faeze</namePart>
<namePart type="family">Brahman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Snigdha</namePart>
<namePart type="family">Chaturvedi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elizabeth</namePart>
<namePart type="family">Clark</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Iyyer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lara</namePart>
<namePart type="given">J</namePart>
<namePart type="family">Martin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Virtual</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Using topic modeling and lexicon-based word similarity, we find that stories generated by GPT-3 exhibit many known gender stereotypes. Generated stories depict different topics and descriptions depending on GPT-3’s perceived gender of the character in a prompt, with feminine characters more likely to be associated with family and appearance, and described as less powerful than masculine characters, even when associated with high power verbs in a prompt. Our study raises questions on how one can avoid unintended social biases when using large language models for storytelling.</abstract>
<identifier type="citekey">lucy-bamman-2021-gender</identifier>
<identifier type="doi">10.18653/v1/2021.nuse-1.5</identifier>
<location>
<url>https://aclanthology.org/2021.nuse-1.5</url>
</location>
<part>
<date>2021-06</date>
<extent unit="page">
<start>48</start>
<end>55</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Gender and Representation Bias in GPT-3 Generated Stories
%A Lucy, Li
%A Bamman, David
%Y Akoury, Nader
%Y Brahman, Faeze
%Y Chaturvedi, Snigdha
%Y Clark, Elizabeth
%Y Iyyer, Mohit
%Y Martin, Lara J.
%S Proceedings of the Third Workshop on Narrative Understanding
%D 2021
%8 June
%I Association for Computational Linguistics
%C Virtual
%F lucy-bamman-2021-gender
%X Using topic modeling and lexicon-based word similarity, we find that stories generated by GPT-3 exhibit many known gender stereotypes. Generated stories depict different topics and descriptions depending on GPT-3’s perceived gender of the character in a prompt, with feminine characters more likely to be associated with family and appearance, and described as less powerful than masculine characters, even when associated with high power verbs in a prompt. Our study raises questions on how one can avoid unintended social biases when using large language models for storytelling.
%R 10.18653/v1/2021.nuse-1.5
%U https://aclanthology.org/2021.nuse-1.5
%U https://doi.org/10.18653/v1/2021.nuse-1.5
%P 48-55
Markdown (Informal)
[Gender and Representation Bias in GPT-3 Generated Stories](https://aclanthology.org/2021.nuse-1.5) (Lucy & Bamman, NUSE-WNU 2021)
ACL