@inproceedings{rondeau-hazen-2018-systematic,
title = "Systematic Error Analysis of the {S}tanford Question Answering Dataset",
author = "Rondeau, Marc-Antoine and
Hazen, T. J.",
editor = "Choi, Eunsol and
Seo, Minjoon and
Chen, Danqi and
Jia, Robin and
Berant, Jonathan",
booktitle = "Proceedings of the Workshop on Machine Reading for Question Answering",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-2602",
doi = "10.18653/v1/W18-2602",
pages = "12--20",
abstract = "We analyzed the outputs of multiple question answering (QA) models applied to the Stanford Question Answering Dataset (SQuAD) to identify the core challenges for QA systems on this data set. Through an iterative process, challenging aspects were hypothesized through qualitative analysis of the common error cases. A classifier was then constructed to predict whether SQuAD test examples were likely to be difficult for systems to answer based on features associated with the hypothesized aspects. The classifier{'}s performance was used to accept or reject each aspect as an indicator of difficulty. With this approach, we ensured that our hypotheses were systematically tested and not simply accepted based on our pre-existing biases. Our explanations are not accepted based on human evaluation of individual examples. This process also enabled us to identify the primary QA strategy learned by the models, i.e., systems determined the acceptable answer type for a question and then selected the acceptable answer span of that type containing the highest density of words present in the question within its local vicinity in the passage.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="rondeau-hazen-2018-systematic">
<titleInfo>
<title>Systematic Error Analysis of the Stanford Question Answering Dataset</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marc-Antoine</namePart>
<namePart type="family">Rondeau</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">T</namePart>
<namePart type="given">J</namePart>
<namePart type="family">Hazen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on Machine Reading for Question Answering</title>
</titleInfo>
<name type="personal">
<namePart type="given">Eunsol</namePart>
<namePart type="family">Choi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Minjoon</namePart>
<namePart type="family">Seo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Danqi</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Robin</namePart>
<namePart type="family">Jia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jonathan</namePart>
<namePart type="family">Berant</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We analyzed the outputs of multiple question answering (QA) models applied to the Stanford Question Answering Dataset (SQuAD) to identify the core challenges for QA systems on this data set. Through an iterative process, challenging aspects were hypothesized through qualitative analysis of the common error cases. A classifier was then constructed to predict whether SQuAD test examples were likely to be difficult for systems to answer based on features associated with the hypothesized aspects. The classifier’s performance was used to accept or reject each aspect as an indicator of difficulty. With this approach, we ensured that our hypotheses were systematically tested and not simply accepted based on our pre-existing biases. Our explanations are not accepted based on human evaluation of individual examples. This process also enabled us to identify the primary QA strategy learned by the models, i.e., systems determined the acceptable answer type for a question and then selected the acceptable answer span of that type containing the highest density of words present in the question within its local vicinity in the passage.</abstract>
<identifier type="citekey">rondeau-hazen-2018-systematic</identifier>
<identifier type="doi">10.18653/v1/W18-2602</identifier>
<location>
<url>https://aclanthology.org/W18-2602</url>
</location>
<part>
<date>2018-07</date>
<extent unit="page">
<start>12</start>
<end>20</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Systematic Error Analysis of the Stanford Question Answering Dataset
%A Rondeau, Marc-Antoine
%A Hazen, T. J.
%Y Choi, Eunsol
%Y Seo, Minjoon
%Y Chen, Danqi
%Y Jia, Robin
%Y Berant, Jonathan
%S Proceedings of the Workshop on Machine Reading for Question Answering
%D 2018
%8 July
%I Association for Computational Linguistics
%C Melbourne, Australia
%F rondeau-hazen-2018-systematic
%X We analyzed the outputs of multiple question answering (QA) models applied to the Stanford Question Answering Dataset (SQuAD) to identify the core challenges for QA systems on this data set. Through an iterative process, challenging aspects were hypothesized through qualitative analysis of the common error cases. A classifier was then constructed to predict whether SQuAD test examples were likely to be difficult for systems to answer based on features associated with the hypothesized aspects. The classifier’s performance was used to accept or reject each aspect as an indicator of difficulty. With this approach, we ensured that our hypotheses were systematically tested and not simply accepted based on our pre-existing biases. Our explanations are not accepted based on human evaluation of individual examples. This process also enabled us to identify the primary QA strategy learned by the models, i.e., systems determined the acceptable answer type for a question and then selected the acceptable answer span of that type containing the highest density of words present in the question within its local vicinity in the passage.
%R 10.18653/v1/W18-2602
%U https://aclanthology.org/W18-2602
%U https://doi.org/10.18653/v1/W18-2602
%P 12-20
Markdown (Informal)
[Systematic Error Analysis of the Stanford Question Answering Dataset](https://aclanthology.org/W18-2602) (Rondeau & Hazen, ACL 2018)
ACL