@inproceedings{qiao-etal-2024-comem,
title = "{COMEM}: In-Context Retrieval-Augmented Mass-Editing Memory in Large Language Models",
author = "Qiao, Shanbao and
Liu, Xuebing and
Na, Seung-Hoon",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-naacl.151",
doi = "10.18653/v1/2024.findings-naacl.151",
pages = "2333--2347",
abstract = "Noting that world knowledge continuously evolves over time, large language models (LLMs) need to be properly adjusted by performing the {``}knowledge editing{''}, which involves updating outdated information or correcting false information. To achieve reliable and {``}massive{''} editing capabilities in terms of $\textit{generalization}$ and $\textit{specificity}$, this paper proposes a unified knowledge editing method called in-$\textbf{CO}$ntext retrieval-augmented $\textbf{M}$ass-$\textbf{E}$diting $\textbf{M}$emory (COMEM), which combines two types of editing approaches: parameter updating and in-context knowledge editing (IKE). In particular, COMEM incorporates $\textit{retrieval-augmented IKE}$, a novel extension of IKE designed for massive editing tasks, based on an $\textit{updating}$-aware demonstration construction.Experimental results on the zsRE and CounterFact datasets demonstrate that COMEM outperforms all existing methods, achieving state-of-the-art performance. Our code is available at https://github.com/JoveReCode/COMEM.git.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="qiao-etal-2024-comem">
<titleInfo>
<title>COMEM: In-Context Retrieval-Augmented Mass-Editing Memory in Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shanbao</namePart>
<namePart type="family">Qiao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xuebing</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Seung-Hoon</namePart>
<namePart type="family">Na</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: NAACL 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Duh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Helena</namePart>
<namePart type="family">Gomez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bethard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Noting that world knowledge continuously evolves over time, large language models (LLMs) need to be properly adjusted by performing the “knowledge editing”, which involves updating outdated information or correcting false information. To achieve reliable and “massive” editing capabilities in terms of generalization and specificity, this paper proposes a unified knowledge editing method called in-COntext retrieval-augmented Mass-Editing Memory (COMEM), which combines two types of editing approaches: parameter updating and in-context knowledge editing (IKE). In particular, COMEM incorporates retrieval-augmented IKE, a novel extension of IKE designed for massive editing tasks, based on an updating-aware demonstration construction.Experimental results on the zsRE and CounterFact datasets demonstrate that COMEM outperforms all existing methods, achieving state-of-the-art performance. Our code is available at https://github.com/JoveReCode/COMEM.git.</abstract>
<identifier type="citekey">qiao-etal-2024-comem</identifier>
<identifier type="doi">10.18653/v1/2024.findings-naacl.151</identifier>
<location>
<url>https://aclanthology.org/2024.findings-naacl.151</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>2333</start>
<end>2347</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T COMEM: In-Context Retrieval-Augmented Mass-Editing Memory in Large Language Models
%A Qiao, Shanbao
%A Liu, Xuebing
%A Na, Seung-Hoon
%Y Duh, Kevin
%Y Gomez, Helena
%Y Bethard, Steven
%S Findings of the Association for Computational Linguistics: NAACL 2024
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F qiao-etal-2024-comem
%X Noting that world knowledge continuously evolves over time, large language models (LLMs) need to be properly adjusted by performing the “knowledge editing”, which involves updating outdated information or correcting false information. To achieve reliable and “massive” editing capabilities in terms of generalization and specificity, this paper proposes a unified knowledge editing method called in-COntext retrieval-augmented Mass-Editing Memory (COMEM), which combines two types of editing approaches: parameter updating and in-context knowledge editing (IKE). In particular, COMEM incorporates retrieval-augmented IKE, a novel extension of IKE designed for massive editing tasks, based on an updating-aware demonstration construction.Experimental results on the zsRE and CounterFact datasets demonstrate that COMEM outperforms all existing methods, achieving state-of-the-art performance. Our code is available at https://github.com/JoveReCode/COMEM.git.
%R 10.18653/v1/2024.findings-naacl.151
%U https://aclanthology.org/2024.findings-naacl.151
%U https://doi.org/10.18653/v1/2024.findings-naacl.151
%P 2333-2347
Markdown (Informal)
[COMEM: In-Context Retrieval-Augmented Mass-Editing Memory in Large Language Models](https://aclanthology.org/2024.findings-naacl.151) (Qiao et al., Findings 2024)
ACL