@inproceedings{elmahdy-etal-2022-privacy,
title = "Privacy Leakage in Text Classification A Data Extraction Approach",
author = "Elmahdy, Adel and
A. Inan, Huseyin and
Sim, Robert",
editor = "Feyisetan, Oluwaseyi and
Ghanavati, Sepideh and
Thaine, Patricia and
Habernal, Ivan and
Mireshghallah, Fatemehsadat",
booktitle = "Proceedings of the Fourth Workshop on Privacy in Natural Language Processing",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.privatenlp-1.3",
doi = "10.18653/v1/2022.privatenlp-1.3",
pages = "13--20",
abstract = "Recent work has demonstrated the successful extraction of training data from generative language models. However, it is not evident whether such extraction is feasible in text classification models since the training objective is to predict the class label as opposed to next-word prediction. This poses an interesting challenge and raises an important question regarding the privacy of training data in text classification settings. Therefore, we study the potential privacy leakage in the text classification domain by investigating the problem of unintended memorization of training data that is not pertinent to the learning task. We propose an algorithm to extract missing tokens of a partial text by exploiting the likelihood of the class label provided by the model. We test the effectiveness of our algorithm by inserting canaries into the training set and attempting to extract tokens in these canaries post-training. In our experiments, we demonstrate that successful extraction is possible to some extent. This can also be used as an auditing strategy to assess any potential unauthorized use of personal data without consent.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="elmahdy-etal-2022-privacy">
<titleInfo>
<title>Privacy Leakage in Text Classification A Data Extraction Approach</title>
</titleInfo>
<name type="personal">
<namePart type="given">Adel</namePart>
<namePart type="family">Elmahdy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Huseyin</namePart>
<namePart type="family">A. Inan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Robert</namePart>
<namePart type="family">Sim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourth Workshop on Privacy in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Oluwaseyi</namePart>
<namePart type="family">Feyisetan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sepideh</namePart>
<namePart type="family">Ghanavati</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Patricia</namePart>
<namePart type="family">Thaine</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="family">Habernal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fatemehsadat</namePart>
<namePart type="family">Mireshghallah</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, United States</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent work has demonstrated the successful extraction of training data from generative language models. However, it is not evident whether such extraction is feasible in text classification models since the training objective is to predict the class label as opposed to next-word prediction. This poses an interesting challenge and raises an important question regarding the privacy of training data in text classification settings. Therefore, we study the potential privacy leakage in the text classification domain by investigating the problem of unintended memorization of training data that is not pertinent to the learning task. We propose an algorithm to extract missing tokens of a partial text by exploiting the likelihood of the class label provided by the model. We test the effectiveness of our algorithm by inserting canaries into the training set and attempting to extract tokens in these canaries post-training. In our experiments, we demonstrate that successful extraction is possible to some extent. This can also be used as an auditing strategy to assess any potential unauthorized use of personal data without consent.</abstract>
<identifier type="citekey">elmahdy-etal-2022-privacy</identifier>
<identifier type="doi">10.18653/v1/2022.privatenlp-1.3</identifier>
<location>
<url>https://aclanthology.org/2022.privatenlp-1.3</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>13</start>
<end>20</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Privacy Leakage in Text Classification A Data Extraction Approach
%A Elmahdy, Adel
%A A. Inan, Huseyin
%A Sim, Robert
%Y Feyisetan, Oluwaseyi
%Y Ghanavati, Sepideh
%Y Thaine, Patricia
%Y Habernal, Ivan
%Y Mireshghallah, Fatemehsadat
%S Proceedings of the Fourth Workshop on Privacy in Natural Language Processing
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, United States
%F elmahdy-etal-2022-privacy
%X Recent work has demonstrated the successful extraction of training data from generative language models. However, it is not evident whether such extraction is feasible in text classification models since the training objective is to predict the class label as opposed to next-word prediction. This poses an interesting challenge and raises an important question regarding the privacy of training data in text classification settings. Therefore, we study the potential privacy leakage in the text classification domain by investigating the problem of unintended memorization of training data that is not pertinent to the learning task. We propose an algorithm to extract missing tokens of a partial text by exploiting the likelihood of the class label provided by the model. We test the effectiveness of our algorithm by inserting canaries into the training set and attempting to extract tokens in these canaries post-training. In our experiments, we demonstrate that successful extraction is possible to some extent. This can also be used as an auditing strategy to assess any potential unauthorized use of personal data without consent.
%R 10.18653/v1/2022.privatenlp-1.3
%U https://aclanthology.org/2022.privatenlp-1.3
%U https://doi.org/10.18653/v1/2022.privatenlp-1.3
%P 13-20
Markdown (Informal)
[Privacy Leakage in Text Classification A Data Extraction Approach](https://aclanthology.org/2022.privatenlp-1.3) (Elmahdy et al., PrivateNLP 2022)
ACL