@InProceedings{xie-EtAl:2017:Long,
  author    = {Xie, Qizhe  and  Ma, Xuezhe  and  Dai, Zihang  and  Hovy, Eduard},
  title     = {An Interpretable Knowledge Transfer Model for Knowledge Base Completion},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {950--962},
  abstract  = {Knowledge bases are important resources for a variety of natural language
	processing tasks but suffer from incompleteness. We propose a novel embedding
	model, ITransF, to perform knowledge base completion. Equipped with a
	sparse attention mechanism, ITransF discovers hidden concepts of relations and
	transfer statistical strength through the sharing of concepts. Moreover, the
	learned associations between relations and concepts, which are represented by
	sparse attention vectors, can be interpreted easily.
	We evaluate ITransF on two benchmark datasets---WN18 and FB15k for knowledge
	base completion and obtains improvements on both the mean rank and Hits$@$10
	metrics, over all baselines that do not use additional information.},
  url       = {http://aclweb.org/anthology/P17-1088}
}

