@inproceedings{petren-bach-hansen-etal-2022-impact,
title = "The Impact of Differential Privacy on Group Disparity Mitigation",
author = "Petren Bach Hansen, Victor and
Tejaswi Neerkaje, Atula and
Sawhney, Ramit and
Flek, Lucie and
Sogaard, Anders",
editor = "Feyisetan, Oluwaseyi and
Ghanavati, Sepideh and
Thaine, Patricia and
Habernal, Ivan and
Mireshghallah, Fatemehsadat",
booktitle = "Proceedings of the Fourth Workshop on Privacy in Natural Language Processing",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.privatenlp-1.2",
doi = "10.18653/v1/2022.privatenlp-1.2",
pages = "12--12",
abstract = "The performance cost of differential privacy has, for some applications, been shown to be higher for minority groups fairness, conversely, has been shown to disproportionally compromise the privacy of members of such groups. Most work in this area has been restricted to computer vision and risk assessment. In this paper, we evaluate the impact of differential privacy on fairness across four tasks, focusing on how attempts to mitigate privacy violations and between-group performance differences interact Does privacy inhibit attempts to ensure fairness? To this end, we train epsilon, delta-differentially private models with empirical risk minimization and group distributionally robust training objectives. Consistent with previous findings, we find that differential privacy increases between-group performance differences in the baseline setting but more interestingly, differential privacy reduces between-group performance differences in the robust setting. We explain this by reinterpreting differential privacy as regularization.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="petren-bach-hansen-etal-2022-impact">
<titleInfo>
<title>The Impact of Differential Privacy on Group Disparity Mitigation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Victor</namePart>
<namePart type="family">Petren Bach Hansen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Atula</namePart>
<namePart type="family">Tejaswi Neerkaje</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ramit</namePart>
<namePart type="family">Sawhney</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucie</namePart>
<namePart type="family">Flek</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anders</namePart>
<namePart type="family">Sogaard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourth Workshop on Privacy in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Oluwaseyi</namePart>
<namePart type="family">Feyisetan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sepideh</namePart>
<namePart type="family">Ghanavati</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Patricia</namePart>
<namePart type="family">Thaine</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="family">Habernal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fatemehsadat</namePart>
<namePart type="family">Mireshghallah</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, United States</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The performance cost of differential privacy has, for some applications, been shown to be higher for minority groups fairness, conversely, has been shown to disproportionally compromise the privacy of members of such groups. Most work in this area has been restricted to computer vision and risk assessment. In this paper, we evaluate the impact of differential privacy on fairness across four tasks, focusing on how attempts to mitigate privacy violations and between-group performance differences interact Does privacy inhibit attempts to ensure fairness? To this end, we train epsilon, delta-differentially private models with empirical risk minimization and group distributionally robust training objectives. Consistent with previous findings, we find that differential privacy increases between-group performance differences in the baseline setting but more interestingly, differential privacy reduces between-group performance differences in the robust setting. We explain this by reinterpreting differential privacy as regularization.</abstract>
<identifier type="citekey">petren-bach-hansen-etal-2022-impact</identifier>
<identifier type="doi">10.18653/v1/2022.privatenlp-1.2</identifier>
<location>
<url>https://aclanthology.org/2022.privatenlp-1.2</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>12</start>
<end>12</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T The Impact of Differential Privacy on Group Disparity Mitigation
%A Petren Bach Hansen, Victor
%A Tejaswi Neerkaje, Atula
%A Sawhney, Ramit
%A Flek, Lucie
%A Sogaard, Anders
%Y Feyisetan, Oluwaseyi
%Y Ghanavati, Sepideh
%Y Thaine, Patricia
%Y Habernal, Ivan
%Y Mireshghallah, Fatemehsadat
%S Proceedings of the Fourth Workshop on Privacy in Natural Language Processing
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, United States
%F petren-bach-hansen-etal-2022-impact
%X The performance cost of differential privacy has, for some applications, been shown to be higher for minority groups fairness, conversely, has been shown to disproportionally compromise the privacy of members of such groups. Most work in this area has been restricted to computer vision and risk assessment. In this paper, we evaluate the impact of differential privacy on fairness across four tasks, focusing on how attempts to mitigate privacy violations and between-group performance differences interact Does privacy inhibit attempts to ensure fairness? To this end, we train epsilon, delta-differentially private models with empirical risk minimization and group distributionally robust training objectives. Consistent with previous findings, we find that differential privacy increases between-group performance differences in the baseline setting but more interestingly, differential privacy reduces between-group performance differences in the robust setting. We explain this by reinterpreting differential privacy as regularization.
%R 10.18653/v1/2022.privatenlp-1.2
%U https://aclanthology.org/2022.privatenlp-1.2
%U https://doi.org/10.18653/v1/2022.privatenlp-1.2
%P 12-12
Markdown (Informal)
[The Impact of Differential Privacy on Group Disparity Mitigation](https://aclanthology.org/2022.privatenlp-1.2) (Petren Bach Hansen et al., PrivateNLP 2022)
ACL
- Victor Petren Bach Hansen, Atula Tejaswi Neerkaje, Ramit Sawhney, Lucie Flek, and Anders Sogaard. 2022. The Impact of Differential Privacy on Group Disparity Mitigation. In Proceedings of the Fourth Workshop on Privacy in Natural Language Processing, pages 12–12, Seattle, United States. Association for Computational Linguistics.