@InProceedings{kadlec-bajgar-kleindienst:2017:RepL4NLP,
  author    = {Kadlec, Rudolf  and  Bajgar, Ondrej  and  Kleindienst, Jan},
  title     = {Knowledge Base Completion: Baselines Strike Back},
  booktitle = {Proceedings of the 2nd Workshop on Representation Learning for NLP},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {69--74},
  abstract  = {Many papers have been published on the knowledge base completion task in the
	past few years. Most of these introduce novel architectures for relation
	learning that are evaluated on standard datasets like FB15k and WN18. This
	paper shows that the accuracy of almost all models published on the FB15k can
	be outperformed by an appropriately tuned baseline --- our reimplementation of
	the DistMult model. 
	Our findings cast doubt on the claim that the performance improvements of
	recent models are due to architectural changes as opposed to hyper-parameter
	tuning or different training objectives.
	This should prompt future research to re-consider how the performance of models
	is evaluated and reported.},
  url       = {http://www.aclweb.org/anthology/W17-2609}
}

