@InProceedings{shen-EtAl:2017:RepL4NLP1,
  author    = {Shen, Yelong  and  Huang, Po-Sen  and  Chang, Ming-Wei  and  Gao, Jianfeng},
  title     = {Modeling Large-Scale Structured Relationships with Shared Memory for Knowledge Base Completion},
  booktitle = {Proceedings of the 2nd Workshop on Representation Learning for NLP},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {57--68},
  abstract  = {Recent studies on knowledge base completion, the task of recovering missing
	relationships based on recorded relations, demonstrate the importance of
	learning embeddings from multi-step relations. However, due to the size of
	knowledge bases, learning multi-step relations directly on top of observed
	triplets could be costly. Hence, a manually designed procedure is often used
	when training the models. In this paper, we propose Implicit ReasoNets (IRNs),
	which is designed to perform multi-step inference implicitly through a
	controller and shared memory. Without a human-designed inference procedure,
	IRNs use training data to learn to perform multi-step inference in an embedding
	neural space through the shared memory and controller. While the inference
	procedure does not explicitly operate on top of observed triplets, our proposed
	model outperforms all previous approaches on the popular FB15k benchmark by
	more than 5.7%.},
  url       = {http://www.aclweb.org/anthology/W17-2608}
}

