@inproceedings{naznin-etal-2025-cstrl,
title = "{CSTRL}: Context-Driven Sequential Transfer Learning for Abstractive Radiology Report Summarization",
author = "Naznin, Mst. Fahmida Sultana and
Faruq, Adnan Ibney and
Tazwar, Mostafa Rifat and
Jobayer, Md and
Shawon, Md. Mehedi Hasan and
Hasan, Md Rakibul",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-acl.1360/",
doi = "10.18653/v1/2025.findings-acl.1360",
pages = "26526--26537",
ISBN = "979-8-89176-256-5",
abstract = "A radiology report comprises several sections, including the Findings and Impression of the diagnosis. Automatically generating the Impression from the Findings is crucial for reducing radiologists' workload and improving diagnostic accuracy. Pretrained models that excel in common abstractive summarization problems encounter challenges when applied to specialized medical domains largely due to the complex terminology and the necessity for accurate clinical context. Such tasks in medical domains demand extracting core information, avoiding context shifts, and maintaining proper flow. Misuse of medical terms can lead to drastic clinical errors. To address these issues, we introduce a sequential transfer learning that ensures key content extraction and coherent summarization. Sequential transfer learning often faces challenges like initial parameter decay and knowledge loss, which we resolve with the Fisher matrix regularization. Using MIMIC-CXR and Open-I datasets, our model, CSTRL {---} Context-driven Sequential TRansfer Learning {---} achieved state-of-the-art performance, showing 56.2{\%} improvement in BLEU-1, 40.5{\%} in BLEU-2, 84.3{\%} in BLEU-3, 28.9{\%} in ROUGE-1, 41.0{\%} in ROUGE-2 and 26.5{\%} in ROGUE-3 score over benchmark studies. We also analyze factual consistency scores while preserving the medical context. Our code is publicly available at https://github.com/fahmidahossain/Report{\_}Summarization."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="naznin-etal-2025-cstrl">
<titleInfo>
<title>CSTRL: Context-Driven Sequential Transfer Learning for Abstractive Radiology Report Summarization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mst.</namePart>
<namePart type="given">Fahmida</namePart>
<namePart type="given">Sultana</namePart>
<namePart type="family">Naznin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Adnan</namePart>
<namePart type="given">Ibney</namePart>
<namePart type="family">Faruq</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mostafa</namePart>
<namePart type="given">Rifat</namePart>
<namePart type="family">Tazwar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Md</namePart>
<namePart type="family">Jobayer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Md.</namePart>
<namePart type="given">Mehedi</namePart>
<namePart type="given">Hasan</namePart>
<namePart type="family">Shawon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Md</namePart>
<namePart type="given">Rakibul</namePart>
<namePart type="family">Hasan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-256-5</identifier>
</relatedItem>
<abstract>A radiology report comprises several sections, including the Findings and Impression of the diagnosis. Automatically generating the Impression from the Findings is crucial for reducing radiologists’ workload and improving diagnostic accuracy. Pretrained models that excel in common abstractive summarization problems encounter challenges when applied to specialized medical domains largely due to the complex terminology and the necessity for accurate clinical context. Such tasks in medical domains demand extracting core information, avoiding context shifts, and maintaining proper flow. Misuse of medical terms can lead to drastic clinical errors. To address these issues, we introduce a sequential transfer learning that ensures key content extraction and coherent summarization. Sequential transfer learning often faces challenges like initial parameter decay and knowledge loss, which we resolve with the Fisher matrix regularization. Using MIMIC-CXR and Open-I datasets, our model, CSTRL — Context-driven Sequential TRansfer Learning — achieved state-of-the-art performance, showing 56.2% improvement in BLEU-1, 40.5% in BLEU-2, 84.3% in BLEU-3, 28.9% in ROUGE-1, 41.0% in ROUGE-2 and 26.5% in ROGUE-3 score over benchmark studies. We also analyze factual consistency scores while preserving the medical context. Our code is publicly available at https://github.com/fahmidahossain/Report_Summarization.</abstract>
<identifier type="citekey">naznin-etal-2025-cstrl</identifier>
<identifier type="doi">10.18653/v1/2025.findings-acl.1360</identifier>
<location>
<url>https://aclanthology.org/2025.findings-acl.1360/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>26526</start>
<end>26537</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T CSTRL: Context-Driven Sequential Transfer Learning for Abstractive Radiology Report Summarization
%A Naznin, Mst. Fahmida Sultana
%A Faruq, Adnan Ibney
%A Tazwar, Mostafa Rifat
%A Jobayer, Md
%A Shawon, Md. Mehedi Hasan
%A Hasan, Md Rakibul
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Findings of the Association for Computational Linguistics: ACL 2025
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-256-5
%F naznin-etal-2025-cstrl
%X A radiology report comprises several sections, including the Findings and Impression of the diagnosis. Automatically generating the Impression from the Findings is crucial for reducing radiologists’ workload and improving diagnostic accuracy. Pretrained models that excel in common abstractive summarization problems encounter challenges when applied to specialized medical domains largely due to the complex terminology and the necessity for accurate clinical context. Such tasks in medical domains demand extracting core information, avoiding context shifts, and maintaining proper flow. Misuse of medical terms can lead to drastic clinical errors. To address these issues, we introduce a sequential transfer learning that ensures key content extraction and coherent summarization. Sequential transfer learning often faces challenges like initial parameter decay and knowledge loss, which we resolve with the Fisher matrix regularization. Using MIMIC-CXR and Open-I datasets, our model, CSTRL — Context-driven Sequential TRansfer Learning — achieved state-of-the-art performance, showing 56.2% improvement in BLEU-1, 40.5% in BLEU-2, 84.3% in BLEU-3, 28.9% in ROUGE-1, 41.0% in ROUGE-2 and 26.5% in ROGUE-3 score over benchmark studies. We also analyze factual consistency scores while preserving the medical context. Our code is publicly available at https://github.com/fahmidahossain/Report_Summarization.
%R 10.18653/v1/2025.findings-acl.1360
%U https://aclanthology.org/2025.findings-acl.1360/
%U https://doi.org/10.18653/v1/2025.findings-acl.1360
%P 26526-26537
Markdown (Informal)
[CSTRL: Context-Driven Sequential Transfer Learning for Abstractive Radiology Report Summarization](https://aclanthology.org/2025.findings-acl.1360/) (Naznin et al., Findings 2025)
ACL