@inproceedings{abercrombie-etal-2021-alexa,
title = "{A}lexa, {G}oogle, {S}iri: What are Your Pronouns? Gender and Anthropomorphism in the Design and Perception of Conversational Assistants",
author = "Abercrombie, Gavin and
Cercas Curry, Amanda and
Pandya, Mugdha and
Rieser, Verena",
editor = "Costa-jussa, Marta and
Gonen, Hila and
Hardmeier, Christian and
Webster, Kellie",
booktitle = "Proceedings of the 3rd Workshop on Gender Bias in Natural Language Processing",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.gebnlp-1.4",
doi = "10.18653/v1/2021.gebnlp-1.4",
pages = "24--33",
abstract = "Technology companies have produced varied responses to concerns about the effects of the design of their conversational AI systems. Some have claimed that their voice assistants are in fact not gendered or human-like{---}despite design features suggesting the contrary. We compare these claims to user perceptions by analysing the pronouns they use when referring to AI assistants. We also examine systems{'} responses and the extent to which they generate output which is gendered and anthropomorphic. We find that, while some companies appear to be addressing the ethical concerns raised, in some cases, their claims do not seem to hold true. In particular, our results show that system outputs are ambiguous as to the humanness of the systems, and that users tend to personify and gender them as a result.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="abercrombie-etal-2021-alexa">
<titleInfo>
<title>Alexa, Google, Siri: What are Your Pronouns? Gender and Anthropomorphism in the Design and Perception of Conversational Assistants</title>
</titleInfo>
<name type="personal">
<namePart type="given">Gavin</namePart>
<namePart type="family">Abercrombie</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amanda</namePart>
<namePart type="family">Cercas Curry</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mugdha</namePart>
<namePart type="family">Pandya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Verena</namePart>
<namePart type="family">Rieser</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Gender Bias in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marta</namePart>
<namePart type="family">Costa-jussa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hila</namePart>
<namePart type="family">Gonen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christian</namePart>
<namePart type="family">Hardmeier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kellie</namePart>
<namePart type="family">Webster</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Technology companies have produced varied responses to concerns about the effects of the design of their conversational AI systems. Some have claimed that their voice assistants are in fact not gendered or human-like—despite design features suggesting the contrary. We compare these claims to user perceptions by analysing the pronouns they use when referring to AI assistants. We also examine systems’ responses and the extent to which they generate output which is gendered and anthropomorphic. We find that, while some companies appear to be addressing the ethical concerns raised, in some cases, their claims do not seem to hold true. In particular, our results show that system outputs are ambiguous as to the humanness of the systems, and that users tend to personify and gender them as a result.</abstract>
<identifier type="citekey">abercrombie-etal-2021-alexa</identifier>
<identifier type="doi">10.18653/v1/2021.gebnlp-1.4</identifier>
<location>
<url>https://aclanthology.org/2021.gebnlp-1.4</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>24</start>
<end>33</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Alexa, Google, Siri: What are Your Pronouns? Gender and Anthropomorphism in the Design and Perception of Conversational Assistants
%A Abercrombie, Gavin
%A Cercas Curry, Amanda
%A Pandya, Mugdha
%A Rieser, Verena
%Y Costa-jussa, Marta
%Y Gonen, Hila
%Y Hardmeier, Christian
%Y Webster, Kellie
%S Proceedings of the 3rd Workshop on Gender Bias in Natural Language Processing
%D 2021
%8 August
%I Association for Computational Linguistics
%C Online
%F abercrombie-etal-2021-alexa
%X Technology companies have produced varied responses to concerns about the effects of the design of their conversational AI systems. Some have claimed that their voice assistants are in fact not gendered or human-like—despite design features suggesting the contrary. We compare these claims to user perceptions by analysing the pronouns they use when referring to AI assistants. We also examine systems’ responses and the extent to which they generate output which is gendered and anthropomorphic. We find that, while some companies appear to be addressing the ethical concerns raised, in some cases, their claims do not seem to hold true. In particular, our results show that system outputs are ambiguous as to the humanness of the systems, and that users tend to personify and gender them as a result.
%R 10.18653/v1/2021.gebnlp-1.4
%U https://aclanthology.org/2021.gebnlp-1.4
%U https://doi.org/10.18653/v1/2021.gebnlp-1.4
%P 24-33
Markdown (Informal)
[Alexa, Google, Siri: What are Your Pronouns? Gender and Anthropomorphism in the Design and Perception of Conversational Assistants](https://aclanthology.org/2021.gebnlp-1.4) (Abercrombie et al., GeBNLP 2021)
ACL