@InProceedings{schneider-EtAl:2017:BLGNLP2017,
  author    = {Schneider, Rudolf  and  Oberhauser, Tom  and  Klatt, Tobias  and  Gers, Felix A.  and  L\"{o}ser, Alexander},
  title     = {Analysing Errors of Open Information Extraction Systems},
  booktitle = {Proceedings of the First Workshop on Building Linguistically Generalizable NLP Systems},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {11--18},
  abstract  = {We report results on benchmarking Open Information Extraction (OIE) systems
	using RelVis, a toolkit for benchmarking Open Information Extraction systems.
	Our comprehensive benchmark contains three data sets from the news domain and
	one data set from Wikipedia with overall 4522 labeled sentences and 11243
	binary or n-ary OIE relations.
	In our analysis on these data sets we compared the performance of four popular
	OIE systems, ClausIE, OpenIE 4.2, Stanford OpenIE and PredPatt.
	In addition, we evaluated the impact of five common error classes on a subset
	of 749 n-ary tuples.
	From our deep analysis we unreveal important research directions for a next
	generation on OIE systems.},
  url       = {http://www.aclweb.org/anthology/W17-5402}
}

