@inproceedings{gutierrez-megias-etal-2024-smart,
title = "Smart Lexical Search for Label Flipping Adversial Attack",
author = "Guti{\'e}rrez-Meg{\'\i}as, Alberto and
Jim{\'e}nez-Zafra, Salud Mar{\'\i}a and
Ure{\~n}a, L. Alfonso and
Mart{\'\i}nez-C{\'a}mara, Eugenio",
editor = "Habernal, Ivan and
Ghanavati, Sepideh and
Ravichander, Abhilasha and
Jain, Vijayanta and
Thaine, Patricia and
Igamberdiev, Timour and
Mireshghallah, Niloofar and
Feyisetan, Oluwaseyi",
booktitle = "Proceedings of the Fifth Workshop on Privacy in Natural Language Processing",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.privatenlp-1.11",
pages = "97--106",
abstract = "Language models are susceptible to vulnerability through adversarial attacks, using manipulations of the input data to disrupt their performance. Accordingly, it represents a cibersecurity leak. Data manipulations are intended to be unidentifiable by the learning model and by humans, small changes can disturb the final label of a classification task. Hence, we propose a novel attack built upon explainability methods to identify the salient lexical units to alter in order to flip the classification label. We asses our proposal on a disinformation dataset, and we show that our attack reaches high balance among stealthiness and efficiency.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gutierrez-megias-etal-2024-smart">
<titleInfo>
<title>Smart Lexical Search for Label Flipping Adversial Attack</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alberto</namePart>
<namePart type="family">Gutiérrez-Megías</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Salud</namePart>
<namePart type="given">María</namePart>
<namePart type="family">Jiménez-Zafra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">L</namePart>
<namePart type="given">Alfonso</namePart>
<namePart type="family">Ureña</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eugenio</namePart>
<namePart type="family">Martínez-Cámara</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fifth Workshop on Privacy in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="family">Habernal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sepideh</namePart>
<namePart type="family">Ghanavati</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Abhilasha</namePart>
<namePart type="family">Ravichander</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vijayanta</namePart>
<namePart type="family">Jain</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Patricia</namePart>
<namePart type="family">Thaine</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Timour</namePart>
<namePart type="family">Igamberdiev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Niloofar</namePart>
<namePart type="family">Mireshghallah</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Oluwaseyi</namePart>
<namePart type="family">Feyisetan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Language models are susceptible to vulnerability through adversarial attacks, using manipulations of the input data to disrupt their performance. Accordingly, it represents a cibersecurity leak. Data manipulations are intended to be unidentifiable by the learning model and by humans, small changes can disturb the final label of a classification task. Hence, we propose a novel attack built upon explainability methods to identify the salient lexical units to alter in order to flip the classification label. We asses our proposal on a disinformation dataset, and we show that our attack reaches high balance among stealthiness and efficiency.</abstract>
<identifier type="citekey">gutierrez-megias-etal-2024-smart</identifier>
<location>
<url>https://aclanthology.org/2024.privatenlp-1.11</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>97</start>
<end>106</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Smart Lexical Search for Label Flipping Adversial Attack
%A Gutiérrez-Megías, Alberto
%A Jiménez-Zafra, Salud María
%A Ureña, L. Alfonso
%A Martínez-Cámara, Eugenio
%Y Habernal, Ivan
%Y Ghanavati, Sepideh
%Y Ravichander, Abhilasha
%Y Jain, Vijayanta
%Y Thaine, Patricia
%Y Igamberdiev, Timour
%Y Mireshghallah, Niloofar
%Y Feyisetan, Oluwaseyi
%S Proceedings of the Fifth Workshop on Privacy in Natural Language Processing
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F gutierrez-megias-etal-2024-smart
%X Language models are susceptible to vulnerability through adversarial attacks, using manipulations of the input data to disrupt their performance. Accordingly, it represents a cibersecurity leak. Data manipulations are intended to be unidentifiable by the learning model and by humans, small changes can disturb the final label of a classification task. Hence, we propose a novel attack built upon explainability methods to identify the salient lexical units to alter in order to flip the classification label. We asses our proposal on a disinformation dataset, and we show that our attack reaches high balance among stealthiness and efficiency.
%U https://aclanthology.org/2024.privatenlp-1.11
%P 97-106
Markdown (Informal)
[Smart Lexical Search for Label Flipping Adversial Attack](https://aclanthology.org/2024.privatenlp-1.11) (Gutiérrez-Megías et al., PrivateNLP-WS 2024)
ACL
- Alberto Gutiérrez-Megías, Salud María Jiménez-Zafra, L. Alfonso Ureña, and Eugenio Martínez-Cámara. 2024. Smart Lexical Search for Label Flipping Adversial Attack. In Proceedings of the Fifth Workshop on Privacy in Natural Language Processing, pages 97–106, Bangkok, Thailand. Association for Computational Linguistics.