@InProceedings{pujara-augustine-getoor:2017:EMNLP2017,
  author    = {Pujara, Jay  and  Augustine, Eriq  and  Getoor, Lise},
  title     = {Sparsity and Noise: Where Knowledge Graph Embeddings Fall Short},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {1751--1756},
  abstract  = {Knowledge graph (KG) embedding techniques use structured relationships between
	entities to learn low-dimensional representations of entities and relations.
	One prominent goal of these approaches is to improve the quality of knowledge
	graphs by removing errors and adding missing facts. Surprisingly, most
	embedding techniques have been evaluated on benchmark datasets consisting of
	dense and reliable subsets of human-curated KGs, which tend to be fairly
	complete and have few errors. In this paper, we consider the problem of
	applying embedding techniques to KGs extracted from text, which are often
	incomplete and contain errors. We compare the sparsity and unreliability of
	different KGs and perform empirical experiments demonstrating how embedding
	approaches degrade as sparsity and unreliability increase.},
  url       = {https://www.aclweb.org/anthology/D17-1184}
}

