@InProceedings{pustejovsky-EtAl:2016:GramLex,
  author    = {Pustejovsky, James  and  Do, Tuan  and  Kehat, Gitit  and  Krishnaswamy, Nikhil},
  title     = {The Development of Multimodal Lexical Resources},
  booktitle = {Proceedings of the Workshop on Grammar and Lexicon: interactions and interfaces (GramLex)},
  month     = {December},
  year      = {2016},
  address   = {Osaka, Japan},
  publisher = {The COLING 2016 Organizing Committee},
  pages     = {41--47},
  abstract  = {Human communication is a multimodal activity, involving not only
	speech and written expressions, but intonation, images, gestures, visual clues,
	and the
	interpretation of actions through perception. In this  paper, we
	describe  the design  of a multimodal lexicon that is able to
	accommodate the diverse modalities that present themselves in NLP applications.
	We have been developing a multimodal semantic representation, VoxML, that
	integrates the encoding of semantic, visual, gestural, and action-based
	features
	associated with linguistic expressions.},
  url       = {http://aclweb.org/anthology/W16-3807}
}

