@InProceedings{kang-EtAl:2018:Long,
  author    = {Kang, Dongyeop  and  Khot, Tushar  and  Sabharwal, Ashish  and  Hovy, Eduard},
  title     = {AdvEntuRe: Adversarial Training for Textual Entailment with Knowledge-Guided Examples},
  booktitle = {Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2018},
  address   = {Melbourne, Australia},
  publisher = {Association for Computational Linguistics},
  pages     = {2418--2428},
  abstract  = {We consider the problem of learning textual entailment models with limited supervision (5K-10K training examples), and present two complementary approaches for it. First, we propose knowledge-guided adversarial example generators for incorporating large lexical resources in entailment models via only a handful of rule templates. Second, to make the entailment model—a discriminator—more robust, we propose the first GAN-style approach for training it using a natural language example generator that iteratively adjusts to the discriminator’s weaknesses. We demonstrate effectiveness using two entailment datasets, where the proposed methods increase accuracy by 4.7% on SciTail and by 2.8% on a 1% sub-sample of SNLI. Notably, even a single hand-written rule, negate, improves the accuracy of negation examples in SNLI by 6.1%.},
  url       = {http://www.aclweb.org/anthology/P18-1225}
}

