@InProceedings{zarriess-schlangen:2017:Long,
  author    = {Zarrie{\ss}, Sina  and  Schlangen, David},
  title     = {Obtaining referential word meanings from visual and distributional information: Experiments on object naming},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {243--254},
  abstract  = {We investigate object naming, which is an important sub-task of referring
	expression generation on real-world images. As opposed to mutually exclusive
	labels used in object recognition, object names are more flexible, subject to
	communicative preferences and semantically related to each other. Therefore, we
	investigate models of referential word meaning that link visual to lexical
	information which we assume to be given through distributional word embeddings.
	We present a model that learns individual predictors for object names that link
	visual and distributional aspects of word meaning during training. We show that
	this is particularly beneficial for zero-shot learning, as compared to
	projecting visual objects directly into the distributional space. In a standard
	object naming task, we find that different ways of combining lexical and visual
	information achieve very similar performance, though experiments on model
	combination suggest that they capture complementary aspects of referential
	meaning.},
  url       = {http://aclweb.org/anthology/P17-1023}
}

