@inproceedings{jain-etal-2021-generating,
title = "Generating Gender Augmented Data for {NLP}",
author = "Jain, Nishtha and
Popovi{\'c}, Maja and
Groves, Declan and
Vanmassenhove, Eva",
editor = "Costa-jussa, Marta and
Gonen, Hila and
Hardmeier, Christian and
Webster, Kellie",
booktitle = "Proceedings of the 3rd Workshop on Gender Bias in Natural Language Processing",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.gebnlp-1.11/",
doi = "10.18653/v1/2021.gebnlp-1.11",
pages = "93--102",
abstract = "Gender bias is a frequent occurrence in NLP-based applications, especially pronounced in gender-inflected languages. Bias can appear through associations of certain adjectives and animate nouns with the natural gender of referents, but also due to unbalanced grammatical gender frequencies of inflected words. This type of bias becomes more evident in generating conversational utterances where gender is not specified within the sentence, because most current NLP applications still work on a sentence-level context. As a step towards more inclusive NLP, this paper proposes an automatic and generalisable re-writing approach for short conversational sentences. The rewriting method can be applied to sentences that, without extra-sentential context, have multiple equivalent alternatives in terms of gender. The method can be applied both for creating gender balanced outputs as well as for creating gender balanced training data. The proposed approach is based on a neural machine translation system trained to {\textquoteleft}translate' from one gender alternative to another. Both the automatic and manual analysis of the approach show promising results with respect to the automatic generation of gender alternatives for conversational sentences in Spanish."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jain-etal-2021-generating">
<titleInfo>
<title>Generating Gender Augmented Data for NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nishtha</namePart>
<namePart type="family">Jain</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maja</namePart>
<namePart type="family">Popović</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Declan</namePart>
<namePart type="family">Groves</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eva</namePart>
<namePart type="family">Vanmassenhove</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Gender Bias in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marta</namePart>
<namePart type="family">Costa-jussa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hila</namePart>
<namePart type="family">Gonen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christian</namePart>
<namePart type="family">Hardmeier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kellie</namePart>
<namePart type="family">Webster</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Gender bias is a frequent occurrence in NLP-based applications, especially pronounced in gender-inflected languages. Bias can appear through associations of certain adjectives and animate nouns with the natural gender of referents, but also due to unbalanced grammatical gender frequencies of inflected words. This type of bias becomes more evident in generating conversational utterances where gender is not specified within the sentence, because most current NLP applications still work on a sentence-level context. As a step towards more inclusive NLP, this paper proposes an automatic and generalisable re-writing approach for short conversational sentences. The rewriting method can be applied to sentences that, without extra-sentential context, have multiple equivalent alternatives in terms of gender. The method can be applied both for creating gender balanced outputs as well as for creating gender balanced training data. The proposed approach is based on a neural machine translation system trained to ‘translate’ from one gender alternative to another. Both the automatic and manual analysis of the approach show promising results with respect to the automatic generation of gender alternatives for conversational sentences in Spanish.</abstract>
<identifier type="citekey">jain-etal-2021-generating</identifier>
<identifier type="doi">10.18653/v1/2021.gebnlp-1.11</identifier>
<location>
<url>https://aclanthology.org/2021.gebnlp-1.11/</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>93</start>
<end>102</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Generating Gender Augmented Data for NLP
%A Jain, Nishtha
%A Popović, Maja
%A Groves, Declan
%A Vanmassenhove, Eva
%Y Costa-jussa, Marta
%Y Gonen, Hila
%Y Hardmeier, Christian
%Y Webster, Kellie
%S Proceedings of the 3rd Workshop on Gender Bias in Natural Language Processing
%D 2021
%8 August
%I Association for Computational Linguistics
%C Online
%F jain-etal-2021-generating
%X Gender bias is a frequent occurrence in NLP-based applications, especially pronounced in gender-inflected languages. Bias can appear through associations of certain adjectives and animate nouns with the natural gender of referents, but also due to unbalanced grammatical gender frequencies of inflected words. This type of bias becomes more evident in generating conversational utterances where gender is not specified within the sentence, because most current NLP applications still work on a sentence-level context. As a step towards more inclusive NLP, this paper proposes an automatic and generalisable re-writing approach for short conversational sentences. The rewriting method can be applied to sentences that, without extra-sentential context, have multiple equivalent alternatives in terms of gender. The method can be applied both for creating gender balanced outputs as well as for creating gender balanced training data. The proposed approach is based on a neural machine translation system trained to ‘translate’ from one gender alternative to another. Both the automatic and manual analysis of the approach show promising results with respect to the automatic generation of gender alternatives for conversational sentences in Spanish.
%R 10.18653/v1/2021.gebnlp-1.11
%U https://aclanthology.org/2021.gebnlp-1.11/
%U https://doi.org/10.18653/v1/2021.gebnlp-1.11
%P 93-102
Markdown (Informal)
[Generating Gender Augmented Data for NLP](https://aclanthology.org/2021.gebnlp-1.11/) (Jain et al., GeBNLP 2021)
ACL
- Nishtha Jain, Maja Popović, Declan Groves, and Eva Vanmassenhove. 2021. Generating Gender Augmented Data for NLP. In Proceedings of the 3rd Workshop on Gender Bias in Natural Language Processing, pages 93–102, Online. Association for Computational Linguistics.