@InProceedings{lu-ng:2017:Long,
  author    = {Lu, Jing  and  Ng, Vincent},
  title     = {Joint Learning for Event Coreference Resolution},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {90--101},
  abstract  = {While joint models have been developed for many NLP tasks, the vast majority of
	event coreference resolvers, including the top-performing resolvers competing
	in the recent TAC KBP 2016 Event Nugget Detection and Coreference task, are
	pipeline-based, where the propagation of errors from the trigger detection
	component to the event coreference component is a major performance limiting
	factor. To address this problem, we propose a model for jointly learning event
	coreference, trigger detection, and event anaphoricity. Our joint model is
	novel in its choice of tasks and its features for capturing cross-task
	interactions. To our knowledge, this is the first attempt to train a
	mention-ranking model and employ event anaphoricity for event coreference. Our
	model achieves the best results to date on the KBP 2016 English and Chinese
	datasets.},
  url       = {http://aclweb.org/anthology/P17-1009}
}

