@InProceedings{ebrahimi-EtAl:2018:Short,
  author    = {Ebrahimi, Javid  and  Rao, Anyi  and  Lowd, Daniel  and  Dou, Dejing},
  title     = {HotFlip: White-Box Adversarial Examples for Text Classification},
  booktitle = {Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
  month     = {July},
  year      = {2018},
  address   = {Melbourne, Australia},
  publisher = {Association for Computational Linguistics},
  pages     = {31--36},
  abstract  = {We propose an efficient method to generate white-box adversarial examples to trick a character-level neural classifier. We find that only a few manipulations are needed to greatly decrease the accuracy. Our method relies on an atomic flip operation, which swaps one token for another, based on the gradients of the one-hot input vectors. Due to efficiency of our method, we can perform adversarial training which makes the model more robust to attacks at test time. With the use of a few semantics-preserving constraints, we demonstrate that HotFlip can be adapted to attack a word-level classifier as well.},
  url       = {http://www.aclweb.org/anthology/P18-2006}
}

