@InProceedings{paperno:2018:BlackboxNLP,
  author    = {Paperno, Denis},
  title     = {Limitations in learning an interpreted language with recurrent models},
  booktitle = {Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {384--386},
  abstract  = {In this submission I report work in progress on learning simplified interpreted languages by means of recurrent models. The data is constructed to reflect core properties of natural language as modeled in formal syntax and semantics. Preliminary results suggest that LSTM networks do generalise to compositional interpretation, albeit only in the most favorable learning setting.},
  url       = {http://www.aclweb.org/anthology/W18-5456}
}

