@inproceedings{kondadadi-etal-2021-optum,
title = "Optum at {MEDIQA} 2021: Abstractive Summarization of Radiology Reports using simple {BART} Finetuning",
author = "Kondadadi, Ravi and
Manchanda, Sahil and
Ngo, Jason and
McCormack, Ronan",
editor = "Demner-Fushman, Dina and
Cohen, Kevin Bretonnel and
Ananiadou, Sophia and
Tsujii, Junichi",
booktitle = "Proceedings of the 20th Workshop on Biomedical Language Processing",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.bionlp-1.32",
doi = "10.18653/v1/2021.bionlp-1.32",
pages = "280--284",
abstract = "This paper describes experiments undertaken and their results as part of the BioNLP MEDIQA 2021 challenge. We participated in Task 3: Radiology Report Summarization. Multiple runs were submitted for evaluation, from solutions leveraging transfer learning from pre-trained transformer models, which were then fine tuned on a subset of MIMIC-CXR, for abstractive report summarization. The task was evaluated using ROUGE and our best performing system obtained a ROUGE-2 score of 0.392.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kondadadi-etal-2021-optum">
<titleInfo>
<title>Optum at MEDIQA 2021: Abstractive Summarization of Radiology Reports using simple BART Finetuning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ravi</namePart>
<namePart type="family">Kondadadi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sahil</namePart>
<namePart type="family">Manchanda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jason</namePart>
<namePart type="family">Ngo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ronan</namePart>
<namePart type="family">McCormack</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 20th Workshop on Biomedical Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dina</namePart>
<namePart type="family">Demner-Fushman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="given">Bretonnel</namePart>
<namePart type="family">Cohen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sophia</namePart>
<namePart type="family">Ananiadou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Junichi</namePart>
<namePart type="family">Tsujii</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes experiments undertaken and their results as part of the BioNLP MEDIQA 2021 challenge. We participated in Task 3: Radiology Report Summarization. Multiple runs were submitted for evaluation, from solutions leveraging transfer learning from pre-trained transformer models, which were then fine tuned on a subset of MIMIC-CXR, for abstractive report summarization. The task was evaluated using ROUGE and our best performing system obtained a ROUGE-2 score of 0.392.</abstract>
<identifier type="citekey">kondadadi-etal-2021-optum</identifier>
<identifier type="doi">10.18653/v1/2021.bionlp-1.32</identifier>
<location>
<url>https://aclanthology.org/2021.bionlp-1.32</url>
</location>
<part>
<date>2021-06</date>
<extent unit="page">
<start>280</start>
<end>284</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Optum at MEDIQA 2021: Abstractive Summarization of Radiology Reports using simple BART Finetuning
%A Kondadadi, Ravi
%A Manchanda, Sahil
%A Ngo, Jason
%A McCormack, Ronan
%Y Demner-Fushman, Dina
%Y Cohen, Kevin Bretonnel
%Y Ananiadou, Sophia
%Y Tsujii, Junichi
%S Proceedings of the 20th Workshop on Biomedical Language Processing
%D 2021
%8 June
%I Association for Computational Linguistics
%C Online
%F kondadadi-etal-2021-optum
%X This paper describes experiments undertaken and their results as part of the BioNLP MEDIQA 2021 challenge. We participated in Task 3: Radiology Report Summarization. Multiple runs were submitted for evaluation, from solutions leveraging transfer learning from pre-trained transformer models, which were then fine tuned on a subset of MIMIC-CXR, for abstractive report summarization. The task was evaluated using ROUGE and our best performing system obtained a ROUGE-2 score of 0.392.
%R 10.18653/v1/2021.bionlp-1.32
%U https://aclanthology.org/2021.bionlp-1.32
%U https://doi.org/10.18653/v1/2021.bionlp-1.32
%P 280-284
Markdown (Informal)
[Optum at MEDIQA 2021: Abstractive Summarization of Radiology Reports using simple BART Finetuning](https://aclanthology.org/2021.bionlp-1.32) (Kondadadi et al., BioNLP 2021)
ACL