@InProceedings{stratos:2017:StructPred,
  author    = {Stratos, Karl},
  title     = {Entity Identification as Multitasking},
  booktitle = {Proceedings of the 2nd Workshop on Structured Prediction for Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {7--11},
  abstract  = {Standard approaches in entity identification hard-code boundary detection and
	type prediction into labels and perform Viterbi. This has two disadvantages: 1.
	the runtime complexity grows quadratically in the number of types, and 2. there
	is no natural segment-level representation. In this paper, we propose a neural
	architecture that addresses these disadvantages. We frame the problem as
	multitasking, separating boundary detection and type prediction but optimizing
	them jointly. Despite its simplicity, this architecture performs competitively
	with fully structured models such as BiLSTM-CRFs while scaling linearly in the
	number of types. Furthermore, by construction, the model induces
	type-disambiguating embeddings of predicted mentions.},
  url       = {http://www.aclweb.org/anthology/W17-4302}
}

