@inproceedings{rawat-etal-2020-entity,
title = "Entity-Enriched Neural Models for Clinical Question Answering",
author = "Rawat, Bhanu Pratap Singh and
Weng, Wei-Hung and
Min, So Yeon and
Raghavan, Preethi and
Szolovits, Peter",
editor = "Demner-Fushman, Dina and
Cohen, Kevin Bretonnel and
Ananiadou, Sophia and
Tsujii, Junichi",
booktitle = "Proceedings of the 19th SIGBioMed Workshop on Biomedical Language Processing",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.bionlp-1.12",
doi = "10.18653/v1/2020.bionlp-1.12",
pages = "112--122",
abstract = "We explore state-of-the-art neural models for question answering on electronic medical records and improve their ability to generalize better on previously unseen (paraphrased) questions at test time. We enable this by learning to predict logical forms as an auxiliary task along with the main task of answer span detection. The predicted logical forms also serve as a rationale for the answer. Further, we also incorporate medical entity information in these models via the ERNIE architecture. We train our models on the large-scale emrQA dataset and observe that our multi-task entity-enriched models generalize to paraphrased questions {\textasciitilde}5{\%} better than the baseline BERT model.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="rawat-etal-2020-entity">
<titleInfo>
<title>Entity-Enriched Neural Models for Clinical Question Answering</title>
</titleInfo>
<name type="personal">
<namePart type="given">Bhanu</namePart>
<namePart type="given">Pratap</namePart>
<namePart type="given">Singh</namePart>
<namePart type="family">Rawat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wei-Hung</namePart>
<namePart type="family">Weng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">So</namePart>
<namePart type="given">Yeon</namePart>
<namePart type="family">Min</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Preethi</namePart>
<namePart type="family">Raghavan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peter</namePart>
<namePart type="family">Szolovits</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 19th SIGBioMed Workshop on Biomedical Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dina</namePart>
<namePart type="family">Demner-Fushman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="given">Bretonnel</namePart>
<namePart type="family">Cohen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sophia</namePart>
<namePart type="family">Ananiadou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Junichi</namePart>
<namePart type="family">Tsujii</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We explore state-of-the-art neural models for question answering on electronic medical records and improve their ability to generalize better on previously unseen (paraphrased) questions at test time. We enable this by learning to predict logical forms as an auxiliary task along with the main task of answer span detection. The predicted logical forms also serve as a rationale for the answer. Further, we also incorporate medical entity information in these models via the ERNIE architecture. We train our models on the large-scale emrQA dataset and observe that our multi-task entity-enriched models generalize to paraphrased questions ~5% better than the baseline BERT model.</abstract>
<identifier type="citekey">rawat-etal-2020-entity</identifier>
<identifier type="doi">10.18653/v1/2020.bionlp-1.12</identifier>
<location>
<url>https://aclanthology.org/2020.bionlp-1.12</url>
</location>
<part>
<date>2020-07</date>
<extent unit="page">
<start>112</start>
<end>122</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Entity-Enriched Neural Models for Clinical Question Answering
%A Rawat, Bhanu Pratap Singh
%A Weng, Wei-Hung
%A Min, So Yeon
%A Raghavan, Preethi
%A Szolovits, Peter
%Y Demner-Fushman, Dina
%Y Cohen, Kevin Bretonnel
%Y Ananiadou, Sophia
%Y Tsujii, Junichi
%S Proceedings of the 19th SIGBioMed Workshop on Biomedical Language Processing
%D 2020
%8 July
%I Association for Computational Linguistics
%C Online
%F rawat-etal-2020-entity
%X We explore state-of-the-art neural models for question answering on electronic medical records and improve their ability to generalize better on previously unseen (paraphrased) questions at test time. We enable this by learning to predict logical forms as an auxiliary task along with the main task of answer span detection. The predicted logical forms also serve as a rationale for the answer. Further, we also incorporate medical entity information in these models via the ERNIE architecture. We train our models on the large-scale emrQA dataset and observe that our multi-task entity-enriched models generalize to paraphrased questions ~5% better than the baseline BERT model.
%R 10.18653/v1/2020.bionlp-1.12
%U https://aclanthology.org/2020.bionlp-1.12
%U https://doi.org/10.18653/v1/2020.bionlp-1.12
%P 112-122
Markdown (Informal)
[Entity-Enriched Neural Models for Clinical Question Answering](https://aclanthology.org/2020.bionlp-1.12) (Rawat et al., BioNLP 2020)
ACL
- Bhanu Pratap Singh Rawat, Wei-Hung Weng, So Yeon Min, Preethi Raghavan, and Peter Szolovits. 2020. Entity-Enriched Neural Models for Clinical Question Answering. In Proceedings of the 19th SIGBioMed Workshop on Biomedical Language Processing, pages 112–122, Online. Association for Computational Linguistics.