@inproceedings{basu-etal-2025-suwmit,
title = "{SUWMIT} at {B}io{L}ay{S}umm2025: Instruction-based Summarization with Contrastive Decoding",
author = "Basu, Priyam and
Cols, Jose and
Jarvis, Daniel and
Park, Yongsin and
Rodabaugh, Daniel",
editor = "Soni, Sarvesh and
Demner-Fushman, Dina",
booktitle = "Proceedings of the 24th Workshop on Biomedical Language Processing (Shared Tasks)",
month = aug,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.bionlp-share.29/",
doi = "10.18653/v1/2025.bionlp-share.29",
pages = "240--248",
ISBN = "979-8-89176-276-3",
abstract = "In the following paper, we present our team{'}s approach to subtask 1.1 of the BioLaySumm 2025 shared task, which entails the automated generation of lay summaries from biomedical articles. To this end, we experiment with a variety of methods for text preprocessing, extractive summarization, model fine-tuning, and abstractive summarization. Our final results are generated on a fine-tuned Llama 3.1 Instruct (8B) model, notably achieving top scores on two out of four relevance metrics, as well as the highest overall ranking among this year{'}s participating teams on the plain lay summarization subtask."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="basu-etal-2025-suwmit">
<titleInfo>
<title>SUWMIT at BioLaySumm2025: Instruction-based Summarization with Contrastive Decoding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Priyam</namePart>
<namePart type="family">Basu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jose</namePart>
<namePart type="family">Cols</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Jarvis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yongsin</namePart>
<namePart type="family">Park</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Rodabaugh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 24th Workshop on Biomedical Language Processing (Shared Tasks)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sarvesh</namePart>
<namePart type="family">Soni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dina</namePart>
<namePart type="family">Demner-Fushman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-276-3</identifier>
</relatedItem>
<abstract>In the following paper, we present our team’s approach to subtask 1.1 of the BioLaySumm 2025 shared task, which entails the automated generation of lay summaries from biomedical articles. To this end, we experiment with a variety of methods for text preprocessing, extractive summarization, model fine-tuning, and abstractive summarization. Our final results are generated on a fine-tuned Llama 3.1 Instruct (8B) model, notably achieving top scores on two out of four relevance metrics, as well as the highest overall ranking among this year’s participating teams on the plain lay summarization subtask.</abstract>
<identifier type="citekey">basu-etal-2025-suwmit</identifier>
<identifier type="doi">10.18653/v1/2025.bionlp-share.29</identifier>
<location>
<url>https://aclanthology.org/2025.bionlp-share.29/</url>
</location>
<part>
<date>2025-08</date>
<extent unit="page">
<start>240</start>
<end>248</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T SUWMIT at BioLaySumm2025: Instruction-based Summarization with Contrastive Decoding
%A Basu, Priyam
%A Cols, Jose
%A Jarvis, Daniel
%A Park, Yongsin
%A Rodabaugh, Daniel
%Y Soni, Sarvesh
%Y Demner-Fushman, Dina
%S Proceedings of the 24th Workshop on Biomedical Language Processing (Shared Tasks)
%D 2025
%8 August
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-276-3
%F basu-etal-2025-suwmit
%X In the following paper, we present our team’s approach to subtask 1.1 of the BioLaySumm 2025 shared task, which entails the automated generation of lay summaries from biomedical articles. To this end, we experiment with a variety of methods for text preprocessing, extractive summarization, model fine-tuning, and abstractive summarization. Our final results are generated on a fine-tuned Llama 3.1 Instruct (8B) model, notably achieving top scores on two out of four relevance metrics, as well as the highest overall ranking among this year’s participating teams on the plain lay summarization subtask.
%R 10.18653/v1/2025.bionlp-share.29
%U https://aclanthology.org/2025.bionlp-share.29/
%U https://doi.org/10.18653/v1/2025.bionlp-share.29
%P 240-248
Markdown (Informal)
[SUWMIT at BioLaySumm2025: Instruction-based Summarization with Contrastive Decoding](https://aclanthology.org/2025.bionlp-share.29/) (Basu et al., BioNLP 2025)
ACL