@inproceedings{yoo-etal-2022-detection,
title = "Detection of Adversarial Examples in Text Classification: Benchmark and Baseline via Robust Density Estimation",
author = "Yoo, KiYoon and
Kim, Jangho and
Jang, Jiho and
Kwak, Nojun",
editor = "Muresan, Smaranda and
Nakov, Preslav and
Villavicencio, Aline",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2022",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.findings-acl.289",
doi = "10.18653/v1/2022.findings-acl.289",
pages = "3656--3672",
abstract = "Word-level adversarial attacks have shown success in NLP models, drastically decreasing the performance of transformer-based models in recent years. As a countermeasure, adversarial defense has been explored, but relatively few efforts have been made to detect adversarial examples. However, detecting adversarial examples may be crucial for automated tasks (e.g. review sentiment analysis) that wish to amass information about a certain population and additionally be a step towards a robust defense system. To this end, we release a dataset for four popular attack methods on four datasets and four models to encourage further research in this field. Along with it, we propose a competitive baseline based on density estimation that has the highest auc on 29 out of 30 dataset-attack-model combinations. The source code is released (\url{https://github.com/bangawayoo/adversarial-examples-in-text-classification}).",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yoo-etal-2022-detection">
<titleInfo>
<title>Detection of Adversarial Examples in Text Classification: Benchmark and Baseline via Robust Density Estimation</title>
</titleInfo>
<name type="personal">
<namePart type="given">KiYoon</namePart>
<namePart type="family">Yoo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jangho</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiho</namePart>
<namePart type="family">Jang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nojun</namePart>
<namePart type="family">Kwak</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2022</title>
</titleInfo>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Preslav</namePart>
<namePart type="family">Nakov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aline</namePart>
<namePart type="family">Villavicencio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Word-level adversarial attacks have shown success in NLP models, drastically decreasing the performance of transformer-based models in recent years. As a countermeasure, adversarial defense has been explored, but relatively few efforts have been made to detect adversarial examples. However, detecting adversarial examples may be crucial for automated tasks (e.g. review sentiment analysis) that wish to amass information about a certain population and additionally be a step towards a robust defense system. To this end, we release a dataset for four popular attack methods on four datasets and four models to encourage further research in this field. Along with it, we propose a competitive baseline based on density estimation that has the highest auc on 29 out of 30 dataset-attack-model combinations. The source code is released (https://github.com/bangawayoo/adversarial-examples-in-text-classification).</abstract>
<identifier type="citekey">yoo-etal-2022-detection</identifier>
<identifier type="doi">10.18653/v1/2022.findings-acl.289</identifier>
<location>
<url>https://aclanthology.org/2022.findings-acl.289</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>3656</start>
<end>3672</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Detection of Adversarial Examples in Text Classification: Benchmark and Baseline via Robust Density Estimation
%A Yoo, KiYoon
%A Kim, Jangho
%A Jang, Jiho
%A Kwak, Nojun
%Y Muresan, Smaranda
%Y Nakov, Preslav
%Y Villavicencio, Aline
%S Findings of the Association for Computational Linguistics: ACL 2022
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F yoo-etal-2022-detection
%X Word-level adversarial attacks have shown success in NLP models, drastically decreasing the performance of transformer-based models in recent years. As a countermeasure, adversarial defense has been explored, but relatively few efforts have been made to detect adversarial examples. However, detecting adversarial examples may be crucial for automated tasks (e.g. review sentiment analysis) that wish to amass information about a certain population and additionally be a step towards a robust defense system. To this end, we release a dataset for four popular attack methods on four datasets and four models to encourage further research in this field. Along with it, we propose a competitive baseline based on density estimation that has the highest auc on 29 out of 30 dataset-attack-model combinations. The source code is released (https://github.com/bangawayoo/adversarial-examples-in-text-classification).
%R 10.18653/v1/2022.findings-acl.289
%U https://aclanthology.org/2022.findings-acl.289
%U https://doi.org/10.18653/v1/2022.findings-acl.289
%P 3656-3672
Markdown (Informal)
[Detection of Adversarial Examples in Text Classification: Benchmark and Baseline via Robust Density Estimation](https://aclanthology.org/2022.findings-acl.289) (Yoo et al., Findings 2022)
ACL