@inproceedings{chang-etal-2021-robustness,
title = "Robustness and Adversarial Examples in Natural Language Processing",
author = "Chang, Kai-Wei and
He, He and
Jia, Robin and
Singh, Sameer",
editor = "Jiang, Jing and
Vuli{\'c}, Ivan",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic {\&} Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.emnlp-tutorials.5",
doi = "10.18653/v1/2021.emnlp-tutorials.5",
pages = "22--26",
abstract = "Recent studies show that many NLP systems are sensitive and vulnerable to a small perturbation of inputs and do not generalize well across different datasets. This lack of robustness derails the use of NLP systems in real-world applications. This tutorial aims at bringing awareness of practical concerns about NLP robustness. It targets NLP researchers and practitioners who are interested in building reliable NLP systems. In particular, we will review recent studies on analyzing the weakness of NLP systems when facing adversarial inputs and data with a distribution shift. We will provide the audience with a holistic view of 1) how to use adversarial examples to examine the weakness of NLP models and facilitate debugging; 2) how to enhance the robustness of existing NLP models and defense against adversarial inputs; and 3) how the consideration of robustness affects the real-world NLP applications used in our daily lives. We will conclude the tutorial by outlining future research directions in this area.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chang-etal-2021-robustness">
<titleInfo>
<title>Robustness and Adversarial Examples in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kai-Wei</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">He</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Robin</namePart>
<namePart type="family">Jia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sameer</namePart>
<namePart type="family">Singh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jing</namePart>
<namePart type="family">Jiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="family">Vulić</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Punta Cana, Dominican Republic & Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent studies show that many NLP systems are sensitive and vulnerable to a small perturbation of inputs and do not generalize well across different datasets. This lack of robustness derails the use of NLP systems in real-world applications. This tutorial aims at bringing awareness of practical concerns about NLP robustness. It targets NLP researchers and practitioners who are interested in building reliable NLP systems. In particular, we will review recent studies on analyzing the weakness of NLP systems when facing adversarial inputs and data with a distribution shift. We will provide the audience with a holistic view of 1) how to use adversarial examples to examine the weakness of NLP models and facilitate debugging; 2) how to enhance the robustness of existing NLP models and defense against adversarial inputs; and 3) how the consideration of robustness affects the real-world NLP applications used in our daily lives. We will conclude the tutorial by outlining future research directions in this area.</abstract>
<identifier type="citekey">chang-etal-2021-robustness</identifier>
<identifier type="doi">10.18653/v1/2021.emnlp-tutorials.5</identifier>
<location>
<url>https://aclanthology.org/2021.emnlp-tutorials.5</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>22</start>
<end>26</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Robustness and Adversarial Examples in Natural Language Processing
%A Chang, Kai-Wei
%A He, He
%A Jia, Robin
%A Singh, Sameer
%Y Jiang, Jing
%Y Vulić, Ivan
%S Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts
%D 2021
%8 November
%I Association for Computational Linguistics
%C Punta Cana, Dominican Republic & Online
%F chang-etal-2021-robustness
%X Recent studies show that many NLP systems are sensitive and vulnerable to a small perturbation of inputs and do not generalize well across different datasets. This lack of robustness derails the use of NLP systems in real-world applications. This tutorial aims at bringing awareness of practical concerns about NLP robustness. It targets NLP researchers and practitioners who are interested in building reliable NLP systems. In particular, we will review recent studies on analyzing the weakness of NLP systems when facing adversarial inputs and data with a distribution shift. We will provide the audience with a holistic view of 1) how to use adversarial examples to examine the weakness of NLP models and facilitate debugging; 2) how to enhance the robustness of existing NLP models and defense against adversarial inputs; and 3) how the consideration of robustness affects the real-world NLP applications used in our daily lives. We will conclude the tutorial by outlining future research directions in this area.
%R 10.18653/v1/2021.emnlp-tutorials.5
%U https://aclanthology.org/2021.emnlp-tutorials.5
%U https://doi.org/10.18653/v1/2021.emnlp-tutorials.5
%P 22-26
Markdown (Informal)
[Robustness and Adversarial Examples in Natural Language Processing](https://aclanthology.org/2021.emnlp-tutorials.5) (Chang et al., EMNLP 2021)
ACL
- Kai-Wei Chang, He He, Robin Jia, and Sameer Singh. 2021. Robustness and Adversarial Examples in Natural Language Processing. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts, pages 22–26, Punta Cana, Dominican Republic & Online. Association for Computational Linguistics.