@inproceedings{moller-etal-2020-covid,
title = "{COVID-QA}: A Question Answering Dataset for {COVID}-19",
author = {M{\"o}ller, Timo and
Reina, Anthony and
Jayakumar, Raghavan and
Pietsch, Malte},
editor = "Verspoor, Karin and
Cohen, Kevin Bretonnel and
Dredze, Mark and
Ferrara, Emilio and
May, Jonathan and
Munro, Robert and
Paris, Cecile and
Wallace, Byron",
booktitle = "Proceedings of the 1st Workshop on {NLP} for {COVID-19} at {ACL} 2020",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.nlpcovid19-acl.18",
abstract = "We present COVID-QA, a Question Answering dataset consisting of 2,019 question/answer pairs annotated by volunteer biomedical experts on scientific articles related to COVID-19. To evaluate the dataset we compared a RoBERTa base model fine-tuned on SQuAD with the same model trained on SQuAD and our COVID-QA dataset. We found that the additional training on this domain-specific data leads to significant gains in performance. Both the trained model and the annotated dataset have been open-sourced at: \url{https://github.com/deepset-ai/COVID-QA}",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="moller-etal-2020-covid">
<titleInfo>
<title>COVID-QA: A Question Answering Dataset for COVID-19</title>
</titleInfo>
<name type="personal">
<namePart type="given">Timo</namePart>
<namePart type="family">Möller</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anthony</namePart>
<namePart type="family">Reina</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Raghavan</namePart>
<namePart type="family">Jayakumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Malte</namePart>
<namePart type="family">Pietsch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on NLP for COVID-19 at ACL 2020</title>
</titleInfo>
<name type="personal">
<namePart type="given">Karin</namePart>
<namePart type="family">Verspoor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="given">Bretonnel</namePart>
<namePart type="family">Cohen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mark</namePart>
<namePart type="family">Dredze</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emilio</namePart>
<namePart type="family">Ferrara</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jonathan</namePart>
<namePart type="family">May</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Robert</namePart>
<namePart type="family">Munro</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cecile</namePart>
<namePart type="family">Paris</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Byron</namePart>
<namePart type="family">Wallace</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We present COVID-QA, a Question Answering dataset consisting of 2,019 question/answer pairs annotated by volunteer biomedical experts on scientific articles related to COVID-19. To evaluate the dataset we compared a RoBERTa base model fine-tuned on SQuAD with the same model trained on SQuAD and our COVID-QA dataset. We found that the additional training on this domain-specific data leads to significant gains in performance. Both the trained model and the annotated dataset have been open-sourced at: https://github.com/deepset-ai/COVID-QA</abstract>
<identifier type="citekey">moller-etal-2020-covid</identifier>
<location>
<url>https://aclanthology.org/2020.nlpcovid19-acl.18</url>
</location>
<part>
<date>2020-07</date>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T COVID-QA: A Question Answering Dataset for COVID-19
%A Möller, Timo
%A Reina, Anthony
%A Jayakumar, Raghavan
%A Pietsch, Malte
%Y Verspoor, Karin
%Y Cohen, Kevin Bretonnel
%Y Dredze, Mark
%Y Ferrara, Emilio
%Y May, Jonathan
%Y Munro, Robert
%Y Paris, Cecile
%Y Wallace, Byron
%S Proceedings of the 1st Workshop on NLP for COVID-19 at ACL 2020
%D 2020
%8 July
%I Association for Computational Linguistics
%C Online
%F moller-etal-2020-covid
%X We present COVID-QA, a Question Answering dataset consisting of 2,019 question/answer pairs annotated by volunteer biomedical experts on scientific articles related to COVID-19. To evaluate the dataset we compared a RoBERTa base model fine-tuned on SQuAD with the same model trained on SQuAD and our COVID-QA dataset. We found that the additional training on this domain-specific data leads to significant gains in performance. Both the trained model and the annotated dataset have been open-sourced at: https://github.com/deepset-ai/COVID-QA
%U https://aclanthology.org/2020.nlpcovid19-acl.18
Markdown (Informal)
[COVID-QA: A Question Answering Dataset for COVID-19](https://aclanthology.org/2020.nlpcovid19-acl.18) (Möller et al., NLP-COVID19 2020)
ACL