@InProceedings{wilcox-EtAl:2018:BlackboxNLP,
  author    = {Wilcox, Ethan  and  Levy, Roger  and  Morita, Takashi  and  Futrell, Richard},
  title     = {What do RNN Language Models Learn about Filler--Gap Dependencies?},
  booktitle = {Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {211--221},
  abstract  = {RNN language models have achieved state-of-the-art perplexity results and have proven useful in a suite of NLP tasks, but it is as yet unclear what syntactic generalizations they learn. Here we investigate whether state-of-the-art RNN language models represent long-distance filler--gap dependencies and constraints on them. Examining RNN behavior on experimentally controlled sentences designed to expose filler--gap dependencies, we show that RNNs can represent the relationship in multiple syntactic positions and over large spans of text. Furthermore, we show that RNNs learn a subset of the known restrictions on filler--gap dependencies, known as island constraints: RNNs show evidence for wh-islands, adjunct islands, and complex NP islands. These studies demonstrates that state-of-the-art RNN models are able to learn and generalize about empty syntactic positions.},
  url       = {http://www.aclweb.org/anthology/W18-5423}
}

