@InProceedings{sennhauser-berwick:2018:BlackboxNLP,
  author    = {Sennhauser, Luzi  and  Berwick, Robert},
  title     = {Evaluating the Ability of LSTMs to Learn Context-Free Grammars},
  booktitle = {Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {115--124},
  abstract  = {While long short-term memory (LSTM) neural net architectures are designed to capture sequence information, human language is generally composed of hierarchical structures. This raises the question as to whether LSTMs can learn hierarchical structures. We explore this question with a well-formed bracket prediction task using two types of brackets modeled by an LSTM.},
  url       = {http://www.aclweb.org/anthology/W18-5414}
}

