@InProceedings{gelderloos-chrupala:2016:COLING,
  author    = {Gelderloos, Lieke  and  Chrupa{\l}a, Grzegorz},
  title     = {From phonemes to images: levels of representation in a recurrent neural model of visually-grounded language learning},
  booktitle = {Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers},
  month     = {December},
  year      = {2016},
  address   = {Osaka, Japan},
  publisher = {The COLING 2016 Organizing Committee},
  pages     = {1309--1319},
  abstract  = {We present a model of visually-grounded language learning based on stacked
	gated recurrent neural networks which learns to predict visual features given
	an image description in the form of a sequence of phonemes. The learning task
	resembles that faced by human language learners who need to discover both
	structure and meaning from noisy and ambiguous data across modalities. We show
	that our model indeed learns to predict features of the visual context given
	phonetically transcribed image descriptions, and show that it represents
	linguistic information in a hierarchy of levels: lower layers in the stack are
	comparatively more sensitive to form, whereas higher layers are more sensitive
	to meaning.},
  url       = {http://aclweb.org/anthology/C16-1124}
}

