@inproceedings{isonuma-titov-2024-unlearning,
title = "Unlearning Traces the Influential Training Data of Language Models",
author = "Isonuma, Masaru and
Titov, Ivan",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.luhme-long.343/",
doi = "10.18653/v1/2024.acl-long.343",
pages = "6312--6325",
abstract = "Identifying the training datasets that influence a language model`s outputs is essential for minimizing the generation of harmful content and enhancing its performance. Ideally, we can measure the influence of each dataset by removing it from training; however, it is prohibitively expensive to retrain a model multiple times. This paper presents UnTrac: unlearning traces the influence of a training dataset on the model`s performance. UnTrac is extremely simple; each training dataset is unlearned by gradient ascent, and we evaluate how much the model`s predictions change after unlearning. Furthermore, we propose a more scalable approach, UnTrac-Inv, which unlearns a test dataset and evaluates the unlearned model on training datasets. UnTrac-Inv resembles UnTrac, while being efficient for massive training datasets. In the experiments, we examine if our methods can assess the influence of pretraining datasets on generating toxic, biased, and untruthful content. Our methods estimate their influence much more accurately than existing methods while requiring neither excessive memory space nor multiple checkpoints."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="isonuma-titov-2024-unlearning">
<titleInfo>
<title>Unlearning Traces the Influential Training Data of Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Masaru</namePart>
<namePart type="family">Isonuma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="family">Titov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lun-Wei</namePart>
<namePart type="family">Ku</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andre</namePart>
<namePart type="family">Martins</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Identifying the training datasets that influence a language model‘s outputs is essential for minimizing the generation of harmful content and enhancing its performance. Ideally, we can measure the influence of each dataset by removing it from training; however, it is prohibitively expensive to retrain a model multiple times. This paper presents UnTrac: unlearning traces the influence of a training dataset on the model‘s performance. UnTrac is extremely simple; each training dataset is unlearned by gradient ascent, and we evaluate how much the model‘s predictions change after unlearning. Furthermore, we propose a more scalable approach, UnTrac-Inv, which unlearns a test dataset and evaluates the unlearned model on training datasets. UnTrac-Inv resembles UnTrac, while being efficient for massive training datasets. In the experiments, we examine if our methods can assess the influence of pretraining datasets on generating toxic, biased, and untruthful content. Our methods estimate their influence much more accurately than existing methods while requiring neither excessive memory space nor multiple checkpoints.</abstract>
<identifier type="citekey">isonuma-titov-2024-unlearning</identifier>
<identifier type="doi">10.18653/v1/2024.acl-long.343</identifier>
<location>
<url>https://aclanthology.org/2024.luhme-long.343/</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>6312</start>
<end>6325</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Unlearning Traces the Influential Training Data of Language Models
%A Isonuma, Masaru
%A Titov, Ivan
%Y Ku, Lun-Wei
%Y Martins, Andre
%Y Srikumar, Vivek
%S Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F isonuma-titov-2024-unlearning
%X Identifying the training datasets that influence a language model‘s outputs is essential for minimizing the generation of harmful content and enhancing its performance. Ideally, we can measure the influence of each dataset by removing it from training; however, it is prohibitively expensive to retrain a model multiple times. This paper presents UnTrac: unlearning traces the influence of a training dataset on the model‘s performance. UnTrac is extremely simple; each training dataset is unlearned by gradient ascent, and we evaluate how much the model‘s predictions change after unlearning. Furthermore, we propose a more scalable approach, UnTrac-Inv, which unlearns a test dataset and evaluates the unlearned model on training datasets. UnTrac-Inv resembles UnTrac, while being efficient for massive training datasets. In the experiments, we examine if our methods can assess the influence of pretraining datasets on generating toxic, biased, and untruthful content. Our methods estimate their influence much more accurately than existing methods while requiring neither excessive memory space nor multiple checkpoints.
%R 10.18653/v1/2024.acl-long.343
%U https://aclanthology.org/2024.luhme-long.343/
%U https://doi.org/10.18653/v1/2024.acl-long.343
%P 6312-6325
Markdown (Informal)
[Unlearning Traces the Influential Training Data of Language Models](https://aclanthology.org/2024.luhme-long.343/) (Isonuma & Titov, ACL 2024)
ACL