@inproceedings{habernal-2021-differential,
title = "When differential privacy meets {NLP}: The devil is in the detail",
author = "Habernal, Ivan",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.emnlp-main.114",
doi = "10.18653/v1/2021.emnlp-main.114",
pages = "1522--1528",
abstract = "Differential privacy provides a formal approach to privacy of individuals. Applications of differential privacy in various scenarios, such as protecting users{'} original utterances, must satisfy certain mathematical properties. Our contribution is a formal analysis of ADePT, a differentially private auto-encoder for text rewriting (Krishna et al, 2021). ADePT achieves promising results on downstream tasks while providing tight privacy guarantees. Our proof reveals that ADePT is not differentially private, thus rendering the experimental results unsubstantiated. We also quantify the impact of the error in its private mechanism, showing that the true sensitivity is higher by at least factor 6 in an optimistic case of a very small encoder{'}s dimension and that the amount of utterances that are not privatized could easily reach 100{\%} of the entire dataset. Our intention is neither to criticize the authors, nor the peer-reviewing process, but rather point out that if differential privacy applications in NLP rely on formal guarantees, these should be outlined in full and put under detailed scrutiny.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="habernal-2021-differential">
<titleInfo>
<title>When differential privacy meets NLP: The devil is in the detail</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="family">Habernal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online and Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Differential privacy provides a formal approach to privacy of individuals. Applications of differential privacy in various scenarios, such as protecting users’ original utterances, must satisfy certain mathematical properties. Our contribution is a formal analysis of ADePT, a differentially private auto-encoder for text rewriting (Krishna et al, 2021). ADePT achieves promising results on downstream tasks while providing tight privacy guarantees. Our proof reveals that ADePT is not differentially private, thus rendering the experimental results unsubstantiated. We also quantify the impact of the error in its private mechanism, showing that the true sensitivity is higher by at least factor 6 in an optimistic case of a very small encoder’s dimension and that the amount of utterances that are not privatized could easily reach 100% of the entire dataset. Our intention is neither to criticize the authors, nor the peer-reviewing process, but rather point out that if differential privacy applications in NLP rely on formal guarantees, these should be outlined in full and put under detailed scrutiny.</abstract>
<identifier type="citekey">habernal-2021-differential</identifier>
<identifier type="doi">10.18653/v1/2021.emnlp-main.114</identifier>
<location>
<url>https://aclanthology.org/2021.emnlp-main.114</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>1522</start>
<end>1528</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T When differential privacy meets NLP: The devil is in the detail
%A Habernal, Ivan
%S Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online and Punta Cana, Dominican Republic
%F habernal-2021-differential
%X Differential privacy provides a formal approach to privacy of individuals. Applications of differential privacy in various scenarios, such as protecting users’ original utterances, must satisfy certain mathematical properties. Our contribution is a formal analysis of ADePT, a differentially private auto-encoder for text rewriting (Krishna et al, 2021). ADePT achieves promising results on downstream tasks while providing tight privacy guarantees. Our proof reveals that ADePT is not differentially private, thus rendering the experimental results unsubstantiated. We also quantify the impact of the error in its private mechanism, showing that the true sensitivity is higher by at least factor 6 in an optimistic case of a very small encoder’s dimension and that the amount of utterances that are not privatized could easily reach 100% of the entire dataset. Our intention is neither to criticize the authors, nor the peer-reviewing process, but rather point out that if differential privacy applications in NLP rely on formal guarantees, these should be outlined in full and put under detailed scrutiny.
%R 10.18653/v1/2021.emnlp-main.114
%U https://aclanthology.org/2021.emnlp-main.114
%U https://doi.org/10.18653/v1/2021.emnlp-main.114
%P 1522-1528
Markdown (Informal)
[When differential privacy meets NLP: The devil is in the detail](https://aclanthology.org/2021.emnlp-main.114) (Habernal, EMNLP 2021)
ACL