@inproceedings{ahuir-esteve-etal-2023-elirf,
title = "{EL}i{RF}-{VRAIN} at {B}io{NLP} Task 1{B}: Radiology Report Summarization",
author = "Ahuir Esteve, Vicent and
Segarra, Encarna and
Hurtado, Lluis",
editor = "Demner-fushman, Dina and
Ananiadou, Sophia and
Cohen, Kevin",
booktitle = "The 22nd Workshop on Biomedical Natural Language Processing and BioNLP Shared Tasks",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.bionlp-1.52",
doi = "10.18653/v1/2023.bionlp-1.52",
pages = "524--529",
abstract = "This paper presents our system at the Radiology Report Summarization Shared Task-1B of the 22nd BioNLP Workshop 2023. Inspired by the work of the BioBART model, we continuously pre-trained a general domain BART model with biomedical data to adapt it to this specific domain. In the pre-training phase, several pre-training tasks are aggregated to inject linguistic knowledge and increase the abstractivity of the generated summaries. We present the results of our models, and also, we have carried out an additional study on the lengths of the generated summaries, which has provided us with interesting information.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ahuir-esteve-etal-2023-elirf">
<titleInfo>
<title>ELiRF-VRAIN at BioNLP Task 1B: Radiology Report Summarization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vicent</namePart>
<namePart type="family">Ahuir Esteve</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Encarna</namePart>
<namePart type="family">Segarra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lluis</namePart>
<namePart type="family">Hurtado</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>The 22nd Workshop on Biomedical Natural Language Processing and BioNLP Shared Tasks</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dina</namePart>
<namePart type="family">Demner-fushman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sophia</namePart>
<namePart type="family">Ananiadou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Cohen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Toronto, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper presents our system at the Radiology Report Summarization Shared Task-1B of the 22nd BioNLP Workshop 2023. Inspired by the work of the BioBART model, we continuously pre-trained a general domain BART model with biomedical data to adapt it to this specific domain. In the pre-training phase, several pre-training tasks are aggregated to inject linguistic knowledge and increase the abstractivity of the generated summaries. We present the results of our models, and also, we have carried out an additional study on the lengths of the generated summaries, which has provided us with interesting information.</abstract>
<identifier type="citekey">ahuir-esteve-etal-2023-elirf</identifier>
<identifier type="doi">10.18653/v1/2023.bionlp-1.52</identifier>
<location>
<url>https://aclanthology.org/2023.bionlp-1.52</url>
</location>
<part>
<date>2023-07</date>
<extent unit="page">
<start>524</start>
<end>529</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T ELiRF-VRAIN at BioNLP Task 1B: Radiology Report Summarization
%A Ahuir Esteve, Vicent
%A Segarra, Encarna
%A Hurtado, Lluis
%Y Demner-fushman, Dina
%Y Ananiadou, Sophia
%Y Cohen, Kevin
%S The 22nd Workshop on Biomedical Natural Language Processing and BioNLP Shared Tasks
%D 2023
%8 July
%I Association for Computational Linguistics
%C Toronto, Canada
%F ahuir-esteve-etal-2023-elirf
%X This paper presents our system at the Radiology Report Summarization Shared Task-1B of the 22nd BioNLP Workshop 2023. Inspired by the work of the BioBART model, we continuously pre-trained a general domain BART model with biomedical data to adapt it to this specific domain. In the pre-training phase, several pre-training tasks are aggregated to inject linguistic knowledge and increase the abstractivity of the generated summaries. We present the results of our models, and also, we have carried out an additional study on the lengths of the generated summaries, which has provided us with interesting information.
%R 10.18653/v1/2023.bionlp-1.52
%U https://aclanthology.org/2023.bionlp-1.52
%U https://doi.org/10.18653/v1/2023.bionlp-1.52
%P 524-529
Markdown (Informal)
[ELiRF-VRAIN at BioNLP Task 1B: Radiology Report Summarization](https://aclanthology.org/2023.bionlp-1.52) (Ahuir Esteve et al., BioNLP 2023)
ACL