@inproceedings{dev-etal-2023-building,
title = "Building Stereotype Repositories with Complementary Approaches for Scale and Depth",
author = "Dev, Sunipa and
Jha, Akshita and
Goyal, Jaya and
Tewari, Dinesh and
Dave, Shachi and
Prabhakaran, Vinodkumar",
editor = "Dev, Sunipa and
Prabhakaran, Vinodkumar and
Adelani, David and
Hovy, Dirk and
Benotti, Luciana",
booktitle = "Proceedings of the First Workshop on Cross-Cultural Considerations in NLP (C3NLP)",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.c3nlp-1.9",
doi = "10.18653/v1/2023.c3nlp-1.9",
pages = "84--90",
abstract = "Measurements of fairness in NLP have been critiqued for lacking concrete definitions of biases or harms measured, and for perpetuating a singular, Western narrative of fairness globally. To combat some of these pivotal issues, methods for curating datasets and benchmarks that target specific harms are rapidly emerging. However, these methods still face the significant challenge of achieving coverage over global cultures and perspectives at scale. To address this, in this paper, we highlight the utility and importance of complementary approaches that leverage both community engagement as well as large generative models, in these curation strategies. We specifically target the harm of stereotyping and demonstrate a pathway to build a benchmark that covers stereotypes about diverse, and intersectional identities. We discuss the two approaches, their advantages and constraints, the characteristics of the data they produce, and finally, their potential to be used complementarily for better evaluation of stereotyping harms.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="dev-etal-2023-building">
<titleInfo>
<title>Building Stereotype Repositories with Complementary Approaches for Scale and Depth</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sunipa</namePart>
<namePart type="family">Dev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Akshita</namePart>
<namePart type="family">Jha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jaya</namePart>
<namePart type="family">Goyal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dinesh</namePart>
<namePart type="family">Tewari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shachi</namePart>
<namePart type="family">Dave</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vinodkumar</namePart>
<namePart type="family">Prabhakaran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Cross-Cultural Considerations in NLP (C3NLP)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sunipa</namePart>
<namePart type="family">Dev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vinodkumar</namePart>
<namePart type="family">Prabhakaran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Adelani</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dirk</namePart>
<namePart type="family">Hovy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Luciana</namePart>
<namePart type="family">Benotti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dubrovnik, Croatia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Measurements of fairness in NLP have been critiqued for lacking concrete definitions of biases or harms measured, and for perpetuating a singular, Western narrative of fairness globally. To combat some of these pivotal issues, methods for curating datasets and benchmarks that target specific harms are rapidly emerging. However, these methods still face the significant challenge of achieving coverage over global cultures and perspectives at scale. To address this, in this paper, we highlight the utility and importance of complementary approaches that leverage both community engagement as well as large generative models, in these curation strategies. We specifically target the harm of stereotyping and demonstrate a pathway to build a benchmark that covers stereotypes about diverse, and intersectional identities. We discuss the two approaches, their advantages and constraints, the characteristics of the data they produce, and finally, their potential to be used complementarily for better evaluation of stereotyping harms.</abstract>
<identifier type="citekey">dev-etal-2023-building</identifier>
<identifier type="doi">10.18653/v1/2023.c3nlp-1.9</identifier>
<location>
<url>https://aclanthology.org/2023.c3nlp-1.9</url>
</location>
<part>
<date>2023-05</date>
<extent unit="page">
<start>84</start>
<end>90</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Building Stereotype Repositories with Complementary Approaches for Scale and Depth
%A Dev, Sunipa
%A Jha, Akshita
%A Goyal, Jaya
%A Tewari, Dinesh
%A Dave, Shachi
%A Prabhakaran, Vinodkumar
%Y Dev, Sunipa
%Y Prabhakaran, Vinodkumar
%Y Adelani, David
%Y Hovy, Dirk
%Y Benotti, Luciana
%S Proceedings of the First Workshop on Cross-Cultural Considerations in NLP (C3NLP)
%D 2023
%8 May
%I Association for Computational Linguistics
%C Dubrovnik, Croatia
%F dev-etal-2023-building
%X Measurements of fairness in NLP have been critiqued for lacking concrete definitions of biases or harms measured, and for perpetuating a singular, Western narrative of fairness globally. To combat some of these pivotal issues, methods for curating datasets and benchmarks that target specific harms are rapidly emerging. However, these methods still face the significant challenge of achieving coverage over global cultures and perspectives at scale. To address this, in this paper, we highlight the utility and importance of complementary approaches that leverage both community engagement as well as large generative models, in these curation strategies. We specifically target the harm of stereotyping and demonstrate a pathway to build a benchmark that covers stereotypes about diverse, and intersectional identities. We discuss the two approaches, their advantages and constraints, the characteristics of the data they produce, and finally, their potential to be used complementarily for better evaluation of stereotyping harms.
%R 10.18653/v1/2023.c3nlp-1.9
%U https://aclanthology.org/2023.c3nlp-1.9
%U https://doi.org/10.18653/v1/2023.c3nlp-1.9
%P 84-90
Markdown (Informal)
[Building Stereotype Repositories with Complementary Approaches for Scale and Depth](https://aclanthology.org/2023.c3nlp-1.9) (Dev et al., C3NLP 2023)
ACL