@InProceedings{gupta-singh-roth:2017:EMNLP2017,
  author    = {Gupta, Nitish  and  Singh, Sameer  and  Roth, Dan},
  title     = {Entity Linking via Joint Encoding of Types, Descriptions, and Context},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {2681--2690},
  abstract  = {For accurate entity linking, we need to capture various information aspects of
	an entity, such as its description in a KB, contexts in which it is mentioned,
	and structured knowledge. Additionally, a linking system should work on texts
	from different domains without requiring domain-specific training data or
	hand-engineered features.
	In this work we present a neural, modular entity linking system that learns a
	unified dense representation for each entity using multiple sources of
	information, such as its description, contexts around its mentions, and its
	fine-grained types. We show that the resulting entity linking system is
	effective at combining these sources, and performs competitively, sometimes
	out-performing current state-of-the-art systems across datasets, without
	requiring any domain-specific training data or hand-engineered features. We
	also show that our model can effectively “embed” entities that are new to
	the KB, and is able to link its mentions accurately.},
  url       = {https://www.aclweb.org/anthology/D17-1284}
}

