@inproceedings{sha-etal-2025-integrating,
title = "Integrating Graph based Algorithm and Transformer Models for Abstractive Summarization",
author = "Sha, Sayed Ayaan Ahmed and
Sivanesan, Sangeetha and
Madasamy, Anand Kumar and
Binu, Navya",
editor = "Modi, Ashutosh and
Ghosh, Saptarshi and
Ekbal, Asif and
Goyal, Pawan and
Jain, Sarika and
Joshi, Abhinav and
Mishra, Shivani and
Datta, Debtanu and
Paul, Shounak and
Singh, Kshetrimayum Boynao and
Kumar, Sandeep",
booktitle = "Proceedings of the 1st Workshop on NLP for Empowering Justice (JUST-NLP 2025)",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.justnlp-main.21/",
pages = "186--190",
ISBN = "979-8-89176-312-8",
abstract = "Summarizing legal documents is a challenging and critical task in the field of Natural Language Processing(NLP). On top of that generating abstractive summaries for legal judgments poses a significant challenge to researchers as there is limitation in the number of input tokens for various language models. In this paper we experimented with two models namely BART base model finetuned on CNN DailyMail dataset along with TextRank and pegasus{\_}indian{\_}legal, a finetuned version of legal-pegasus on Indian legal judgments for generating abstractive summaries for Indian legal documents as part of the JUSTNLP 2025 - Shared Task on Legal Summarization. BART+TextRank outperformed pegasus{\_}indian{\_}legal with a score of 18.84."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sha-etal-2025-integrating">
<titleInfo>
<title>Integrating Graph based Algorithm and Transformer Models for Abstractive Summarization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sayed</namePart>
<namePart type="given">Ayaan</namePart>
<namePart type="given">Ahmed</namePart>
<namePart type="family">Sha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sangeetha</namePart>
<namePart type="family">Sivanesan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anand</namePart>
<namePart type="given">Kumar</namePart>
<namePart type="family">Madasamy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Navya</namePart>
<namePart type="family">Binu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on NLP for Empowering Justice (JUST-NLP 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ashutosh</namePart>
<namePart type="family">Modi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saptarshi</namePart>
<namePart type="family">Ghosh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Asif</namePart>
<namePart type="family">Ekbal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pawan</namePart>
<namePart type="family">Goyal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sarika</namePart>
<namePart type="family">Jain</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Abhinav</namePart>
<namePart type="family">Joshi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shivani</namePart>
<namePart type="family">Mishra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Debtanu</namePart>
<namePart type="family">Datta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shounak</namePart>
<namePart type="family">Paul</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kshetrimayum</namePart>
<namePart type="given">Boynao</namePart>
<namePart type="family">Singh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sandeep</namePart>
<namePart type="family">Kumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mumbai, India</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-312-8</identifier>
</relatedItem>
<abstract>Summarizing legal documents is a challenging and critical task in the field of Natural Language Processing(NLP). On top of that generating abstractive summaries for legal judgments poses a significant challenge to researchers as there is limitation in the number of input tokens for various language models. In this paper we experimented with two models namely BART base model finetuned on CNN DailyMail dataset along with TextRank and pegasus_indian_legal, a finetuned version of legal-pegasus on Indian legal judgments for generating abstractive summaries for Indian legal documents as part of the JUSTNLP 2025 - Shared Task on Legal Summarization. BART+TextRank outperformed pegasus_indian_legal with a score of 18.84.</abstract>
<identifier type="citekey">sha-etal-2025-integrating</identifier>
<location>
<url>https://aclanthology.org/2025.justnlp-main.21/</url>
</location>
<part>
<date>2025-12</date>
<extent unit="page">
<start>186</start>
<end>190</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Integrating Graph based Algorithm and Transformer Models for Abstractive Summarization
%A Sha, Sayed Ayaan Ahmed
%A Sivanesan, Sangeetha
%A Madasamy, Anand Kumar
%A Binu, Navya
%Y Modi, Ashutosh
%Y Ghosh, Saptarshi
%Y Ekbal, Asif
%Y Goyal, Pawan
%Y Jain, Sarika
%Y Joshi, Abhinav
%Y Mishra, Shivani
%Y Datta, Debtanu
%Y Paul, Shounak
%Y Singh, Kshetrimayum Boynao
%Y Kumar, Sandeep
%S Proceedings of the 1st Workshop on NLP for Empowering Justice (JUST-NLP 2025)
%D 2025
%8 December
%I Association for Computational Linguistics
%C Mumbai, India
%@ 979-8-89176-312-8
%F sha-etal-2025-integrating
%X Summarizing legal documents is a challenging and critical task in the field of Natural Language Processing(NLP). On top of that generating abstractive summaries for legal judgments poses a significant challenge to researchers as there is limitation in the number of input tokens for various language models. In this paper we experimented with two models namely BART base model finetuned on CNN DailyMail dataset along with TextRank and pegasus_indian_legal, a finetuned version of legal-pegasus on Indian legal judgments for generating abstractive summaries for Indian legal documents as part of the JUSTNLP 2025 - Shared Task on Legal Summarization. BART+TextRank outperformed pegasus_indian_legal with a score of 18.84.
%U https://aclanthology.org/2025.justnlp-main.21/
%P 186-190
Markdown (Informal)
[Integrating Graph based Algorithm and Transformer Models for Abstractive Summarization](https://aclanthology.org/2025.justnlp-main.21/) (Sha et al., JUSTNLP 2025)
ACL