@InProceedings{peng-chaturvedi-roth:2017:CoNLL,
  author    = {Peng, Haoruo  and  Chaturvedi, Snigdha  and  Roth, Dan},
  title     = {A Joint Model for Semantic Sequences: Frames, Entities, Sentiments},
  booktitle = {Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {173--183},
  abstract  = {Understanding stories -- sequences of events -- is a crucial yet challenging
	natural language understanding task. These events typically carry multiple
	aspects of semantics including actions, entities and emotions. Not only does
	each individual aspect contribute to the meaning of the story, so does the
	interaction among these aspects.   
	Building on this intuition, we propose to jointly model important aspects of
	semantic knowledge -- frames, entities and sentiments -- via a semantic
	language model. We achieve this by first representing these aspects' semantic
	units at an appropriate level of abstraction and then using the resulting
	vector representations for each semantic aspect to learn a joint representation
	via a neural language model.
	We show that the joint semantic language model is of high quality and can
	generate better semantic sequences than models that operate on the word level.
	We further demonstrate that our joint model can be applied to story cloze test
	and shallow discourse parsing tasks with improved performance and that each
	semantic aspect contributes to the model.},
  url       = {http://aclweb.org/anthology/K17-1019}
}

