@inproceedings{schneider-etal-2017-analysing,
title = "Analysing Errors of Open Information Extraction Systems",
author = {Schneider, Rudolf and
Oberhauser, Tom and
Klatt, Tobias and
Gers, Felix A. and
L{\"o}ser, Alexander},
editor = "Bender, Emily and
Daum{\'e} III, Hal and
Ettinger, Allyson and
Rao, Sudha",
booktitle = "Proceedings of the First Workshop on Building Linguistically Generalizable {NLP} Systems",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-5402",
doi = "10.18653/v1/W17-5402",
pages = "11--18",
abstract = "We report results on benchmarking Open Information Extraction (OIE) systems using RelVis, a toolkit for benchmarking Open Information Extraction systems. Our comprehensive benchmark contains three data sets from the news domain and one data set from Wikipedia with overall 4522 labeled sentences and 11243 binary or n-ary OIE relations. In our analysis on these data sets we compared the performance of four popular OIE systems, ClausIE, OpenIE 4.2, Stanford OpenIE and PredPatt. In addition, we evaluated the impact of five common error classes on a subset of 749 n-ary tuples. From our deep analysis we unreveal important research directions for a next generation on OIE systems.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="schneider-etal-2017-analysing">
<titleInfo>
<title>Analysing Errors of Open Information Extraction Systems</title>
</titleInfo>
<name type="personal">
<namePart type="given">Rudolf</namePart>
<namePart type="family">Schneider</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tom</namePart>
<namePart type="family">Oberhauser</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tobias</namePart>
<namePart type="family">Klatt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Felix</namePart>
<namePart type="given">A</namePart>
<namePart type="family">Gers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexander</namePart>
<namePart type="family">Löser</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Building Linguistically Generalizable NLP Systems</title>
</titleInfo>
<name type="personal">
<namePart type="given">Emily</namePart>
<namePart type="family">Bender</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hal</namePart>
<namePart type="family">Daumé III</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Allyson</namePart>
<namePart type="family">Ettinger</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sudha</namePart>
<namePart type="family">Rao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Copenhagen, Denmark</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We report results on benchmarking Open Information Extraction (OIE) systems using RelVis, a toolkit for benchmarking Open Information Extraction systems. Our comprehensive benchmark contains three data sets from the news domain and one data set from Wikipedia with overall 4522 labeled sentences and 11243 binary or n-ary OIE relations. In our analysis on these data sets we compared the performance of four popular OIE systems, ClausIE, OpenIE 4.2, Stanford OpenIE and PredPatt. In addition, we evaluated the impact of five common error classes on a subset of 749 n-ary tuples. From our deep analysis we unreveal important research directions for a next generation on OIE systems.</abstract>
<identifier type="citekey">schneider-etal-2017-analysing</identifier>
<identifier type="doi">10.18653/v1/W17-5402</identifier>
<location>
<url>https://aclanthology.org/W17-5402</url>
</location>
<part>
<date>2017-09</date>
<extent unit="page">
<start>11</start>
<end>18</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Analysing Errors of Open Information Extraction Systems
%A Schneider, Rudolf
%A Oberhauser, Tom
%A Klatt, Tobias
%A Gers, Felix A.
%A Löser, Alexander
%Y Bender, Emily
%Y Daumé III, Hal
%Y Ettinger, Allyson
%Y Rao, Sudha
%S Proceedings of the First Workshop on Building Linguistically Generalizable NLP Systems
%D 2017
%8 September
%I Association for Computational Linguistics
%C Copenhagen, Denmark
%F schneider-etal-2017-analysing
%X We report results on benchmarking Open Information Extraction (OIE) systems using RelVis, a toolkit for benchmarking Open Information Extraction systems. Our comprehensive benchmark contains three data sets from the news domain and one data set from Wikipedia with overall 4522 labeled sentences and 11243 binary or n-ary OIE relations. In our analysis on these data sets we compared the performance of four popular OIE systems, ClausIE, OpenIE 4.2, Stanford OpenIE and PredPatt. In addition, we evaluated the impact of five common error classes on a subset of 749 n-ary tuples. From our deep analysis we unreveal important research directions for a next generation on OIE systems.
%R 10.18653/v1/W17-5402
%U https://aclanthology.org/W17-5402
%U https://doi.org/10.18653/v1/W17-5402
%P 11-18
Markdown (Informal)
[Analysing Errors of Open Information Extraction Systems](https://aclanthology.org/W17-5402) (Schneider et al., 2017)
ACL
- Rudolf Schneider, Tom Oberhauser, Tobias Klatt, Felix A. Gers, and Alexander Löser. 2017. Analysing Errors of Open Information Extraction Systems. In Proceedings of the First Workshop on Building Linguistically Generalizable NLP Systems, pages 11–18, Copenhagen, Denmark. Association for Computational Linguistics.