@inproceedings{delbrouck-etal-2021-qiai,
title = "{QIAI} at {MEDIQA} 2021: Multimodal Radiology Report Summarization",
author = "Delbrouck, Jean-Benoit and
Zhang, Cassie and
Rubin, Daniel",
editor = "Demner-Fushman, Dina and
Cohen, Kevin Bretonnel and
Ananiadou, Sophia and
Tsujii, Junichi",
booktitle = "Proceedings of the 20th Workshop on Biomedical Language Processing",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.bionlp-1.33",
doi = "10.18653/v1/2021.bionlp-1.33",
pages = "285--290",
abstract = "This paper describes the solution of the QIAI lab sent to the Radiology Report Summarization (RRS) challenge at MEDIQA 2021. This paper aims to investigate whether using multimodality during training improves the summarizing performances of the model at test-time. Our preliminary results shows that taking advantage of the visual features from the x-rays associated to the radiology reports leads to higher evaluation metrics compared to a text-only baseline system. These improvements are reported according to the automatic evaluation metrics METEOR, BLEU and ROUGE scores. Our experiments can be fully replicated at the following address: \url{https://github.com/jbdel/vilmedic}.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="delbrouck-etal-2021-qiai">
<titleInfo>
<title>QIAI at MEDIQA 2021: Multimodal Radiology Report Summarization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jean-Benoit</namePart>
<namePart type="family">Delbrouck</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cassie</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Rubin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 20th Workshop on Biomedical Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dina</namePart>
<namePart type="family">Demner-Fushman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="given">Bretonnel</namePart>
<namePart type="family">Cohen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sophia</namePart>
<namePart type="family">Ananiadou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Junichi</namePart>
<namePart type="family">Tsujii</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes the solution of the QIAI lab sent to the Radiology Report Summarization (RRS) challenge at MEDIQA 2021. This paper aims to investigate whether using multimodality during training improves the summarizing performances of the model at test-time. Our preliminary results shows that taking advantage of the visual features from the x-rays associated to the radiology reports leads to higher evaluation metrics compared to a text-only baseline system. These improvements are reported according to the automatic evaluation metrics METEOR, BLEU and ROUGE scores. Our experiments can be fully replicated at the following address: https://github.com/jbdel/vilmedic.</abstract>
<identifier type="citekey">delbrouck-etal-2021-qiai</identifier>
<identifier type="doi">10.18653/v1/2021.bionlp-1.33</identifier>
<location>
<url>https://aclanthology.org/2021.bionlp-1.33</url>
</location>
<part>
<date>2021-06</date>
<extent unit="page">
<start>285</start>
<end>290</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T QIAI at MEDIQA 2021: Multimodal Radiology Report Summarization
%A Delbrouck, Jean-Benoit
%A Zhang, Cassie
%A Rubin, Daniel
%Y Demner-Fushman, Dina
%Y Cohen, Kevin Bretonnel
%Y Ananiadou, Sophia
%Y Tsujii, Junichi
%S Proceedings of the 20th Workshop on Biomedical Language Processing
%D 2021
%8 June
%I Association for Computational Linguistics
%C Online
%F delbrouck-etal-2021-qiai
%X This paper describes the solution of the QIAI lab sent to the Radiology Report Summarization (RRS) challenge at MEDIQA 2021. This paper aims to investigate whether using multimodality during training improves the summarizing performances of the model at test-time. Our preliminary results shows that taking advantage of the visual features from the x-rays associated to the radiology reports leads to higher evaluation metrics compared to a text-only baseline system. These improvements are reported according to the automatic evaluation metrics METEOR, BLEU and ROUGE scores. Our experiments can be fully replicated at the following address: https://github.com/jbdel/vilmedic.
%R 10.18653/v1/2021.bionlp-1.33
%U https://aclanthology.org/2021.bionlp-1.33
%U https://doi.org/10.18653/v1/2021.bionlp-1.33
%P 285-290
Markdown (Informal)
[QIAI at MEDIQA 2021: Multimodal Radiology Report Summarization](https://aclanthology.org/2021.bionlp-1.33) (Delbrouck et al., BioNLP 2021)
ACL