@InProceedings{chrupala-gelderloos-alishahi:2017:Long,
  author    = {Chrupa{\l}a, Grzegorz  and  Gelderloos, Lieke  and  Alishahi, Afra},
  title     = {Representations of language in a model of visually grounded speech signal},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {613--622},
  abstract  = {We present a visually grounded model of speech perception which projects spoken
	utterances and images to a joint semantic space. We use a multi-layer recurrent
	highway network to model the temporal nature of spoken speech, and show that it
	learns to extract both form and meaning-based linguistic knowledge from the
	input signal. We carry out an in-depth analysis of the representations used by
	different components of the trained model and show that encoding of semantic
	aspects tends to become richer as we go up the hierarchy of layers, whereas
	encoding of form-related aspects of the language input tends to initially
	increase and then plateau or decrease.},
  url       = {http://aclweb.org/anthology/P17-1057}
}

