@inproceedings{zhang-etal-2021-certified,
title = "Certified Robustness to Programmable Transformations in {LSTM}s",
author = "Zhang, Yuhao and
Albarghouthi, Aws and
D{'}Antoni, Loris",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.emnlp-main.82",
doi = "10.18653/v1/2021.emnlp-main.82",
pages = "1068--1083",
abstract = "Deep neural networks for natural language processing are fragile in the face of adversarial examples{---}small input perturbations, like synonym substitution or word duplication, which cause a neural network to change its prediction. We present an approach to certifying the robustness of LSTMs (and extensions of LSTMs) and training models that can be efficiently certified. Our approach can certify robustness to intractably large perturbation spaces defined programmatically in a language of string transformations. Our evaluation shows that (1) our approach can train models that are more robust to combinations of string transformations than those produced using existing techniques; (2) our approach can show high certification accuracy of the resulting models.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhang-etal-2021-certified">
<titleInfo>
<title>Certified Robustness to Programmable Transformations in LSTMs</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yuhao</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aws</namePart>
<namePart type="family">Albarghouthi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Loris</namePart>
<namePart type="family">D’Antoni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online and Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Deep neural networks for natural language processing are fragile in the face of adversarial examples—small input perturbations, like synonym substitution or word duplication, which cause a neural network to change its prediction. We present an approach to certifying the robustness of LSTMs (and extensions of LSTMs) and training models that can be efficiently certified. Our approach can certify robustness to intractably large perturbation spaces defined programmatically in a language of string transformations. Our evaluation shows that (1) our approach can train models that are more robust to combinations of string transformations than those produced using existing techniques; (2) our approach can show high certification accuracy of the resulting models.</abstract>
<identifier type="citekey">zhang-etal-2021-certified</identifier>
<identifier type="doi">10.18653/v1/2021.emnlp-main.82</identifier>
<location>
<url>https://aclanthology.org/2021.emnlp-main.82</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>1068</start>
<end>1083</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Certified Robustness to Programmable Transformations in LSTMs
%A Zhang, Yuhao
%A Albarghouthi, Aws
%A D’Antoni, Loris
%S Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online and Punta Cana, Dominican Republic
%F zhang-etal-2021-certified
%X Deep neural networks for natural language processing are fragile in the face of adversarial examples—small input perturbations, like synonym substitution or word duplication, which cause a neural network to change its prediction. We present an approach to certifying the robustness of LSTMs (and extensions of LSTMs) and training models that can be efficiently certified. Our approach can certify robustness to intractably large perturbation spaces defined programmatically in a language of string transformations. Our evaluation shows that (1) our approach can train models that are more robust to combinations of string transformations than those produced using existing techniques; (2) our approach can show high certification accuracy of the resulting models.
%R 10.18653/v1/2021.emnlp-main.82
%U https://aclanthology.org/2021.emnlp-main.82
%U https://doi.org/10.18653/v1/2021.emnlp-main.82
%P 1068-1083
Markdown (Informal)
[Certified Robustness to Programmable Transformations in LSTMs](https://aclanthology.org/2021.emnlp-main.82) (Zhang et al., EMNLP 2021)
ACL
- Yuhao Zhang, Aws Albarghouthi, and Loris D’Antoni. 2021. Certified Robustness to Programmable Transformations in LSTMs. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 1068–1083, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.