@inproceedings{kerrigan-etal-2020-differentially,
title = "Differentially Private Language Models Benefit from Public Pre-training",
author = "Kerrigan, Gavin and
Slack, Dylan and
Tuyls, Jens",
editor = "Feyisetan, Oluwaseyi and
Ghanavati, Sepideh and
Malmasi, Shervin and
Thaine, Patricia",
booktitle = "Proceedings of the Second Workshop on Privacy in NLP",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.privatenlp-1.5",
doi = "10.18653/v1/2020.privatenlp-1.5",
pages = "39--45",
abstract = "Language modeling is a keystone task in natural language processing. When training a language model on sensitive information, differential privacy (DP) allows us to quantify the degree to which our private data is protected. However, training algorithms which enforce differential privacy often lead to degradation in model quality. We study the feasibility of learning a language model which is simultaneously high-quality and privacy preserving by tuning a public base model on a private corpus. We find that DP fine-tuning boosts the performance of language models in the private domain, making the training of such models possible.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kerrigan-etal-2020-differentially">
<titleInfo>
<title>Differentially Private Language Models Benefit from Public Pre-training</title>
</titleInfo>
<name type="personal">
<namePart type="given">Gavin</namePart>
<namePart type="family">Kerrigan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dylan</namePart>
<namePart type="family">Slack</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jens</namePart>
<namePart type="family">Tuyls</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Privacy in NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Oluwaseyi</namePart>
<namePart type="family">Feyisetan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sepideh</namePart>
<namePart type="family">Ghanavati</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shervin</namePart>
<namePart type="family">Malmasi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Patricia</namePart>
<namePart type="family">Thaine</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Language modeling is a keystone task in natural language processing. When training a language model on sensitive information, differential privacy (DP) allows us to quantify the degree to which our private data is protected. However, training algorithms which enforce differential privacy often lead to degradation in model quality. We study the feasibility of learning a language model which is simultaneously high-quality and privacy preserving by tuning a public base model on a private corpus. We find that DP fine-tuning boosts the performance of language models in the private domain, making the training of such models possible.</abstract>
<identifier type="citekey">kerrigan-etal-2020-differentially</identifier>
<identifier type="doi">10.18653/v1/2020.privatenlp-1.5</identifier>
<location>
<url>https://aclanthology.org/2020.privatenlp-1.5</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>39</start>
<end>45</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Differentially Private Language Models Benefit from Public Pre-training
%A Kerrigan, Gavin
%A Slack, Dylan
%A Tuyls, Jens
%Y Feyisetan, Oluwaseyi
%Y Ghanavati, Sepideh
%Y Malmasi, Shervin
%Y Thaine, Patricia
%S Proceedings of the Second Workshop on Privacy in NLP
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F kerrigan-etal-2020-differentially
%X Language modeling is a keystone task in natural language processing. When training a language model on sensitive information, differential privacy (DP) allows us to quantify the degree to which our private data is protected. However, training algorithms which enforce differential privacy often lead to degradation in model quality. We study the feasibility of learning a language model which is simultaneously high-quality and privacy preserving by tuning a public base model on a private corpus. We find that DP fine-tuning boosts the performance of language models in the private domain, making the training of such models possible.
%R 10.18653/v1/2020.privatenlp-1.5
%U https://aclanthology.org/2020.privatenlp-1.5
%U https://doi.org/10.18653/v1/2020.privatenlp-1.5
%P 39-45
Markdown (Informal)
[Differentially Private Language Models Benefit from Public Pre-training](https://aclanthology.org/2020.privatenlp-1.5) (Kerrigan et al., PrivateNLP 2020)
ACL