@inproceedings{n-vora-etal-2024-extractive,
title = "Extractive Summarization using Extended {T}ext{R}ank Algorithm",
author = "N. Vora, Ansh and
Jain, Rinit Mayur and
Shah, Aastha Sanjeev and
Sonawane, Sheetal",
editor = "Lalitha Devi, Sobha and
Arora, Karunesh",
booktitle = "Proceedings of the 21st International Conference on Natural Language Processing (ICON)",
month = dec,
year = "2024",
address = "AU-KBC Research Centre, Chennai, India",
publisher = "NLP Association of India (NLPAI)",
url = "https://aclanthology.org/2024.icon-1.54/",
pages = "462--471",
abstract = "With so much information available online, it`s more important than ever to have reliable tools for summarizing text quickly and accurately. In this paper, we introduce a new way to improve the popular TextRank algorithm for extractive summarization. By adding a dynamic damping factor and using Latent Dirichlet Allocation (LDA) to enhance how text is represented, our method creates more meaningful summaries. We tested it with metrics like Pyramid, METEOR, and ROUGE, and compared it to the original TextRank. The results were promising, showing that our approach produces better summaries and could be useful for real-world applications like text mining and information retrieval."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="n-vora-etal-2024-extractive">
<titleInfo>
<title>Extractive Summarization using Extended TextRank Algorithm</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ansh</namePart>
<namePart type="family">N. Vora</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rinit</namePart>
<namePart type="given">Mayur</namePart>
<namePart type="family">Jain</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aastha</namePart>
<namePart type="given">Sanjeev</namePart>
<namePart type="family">Shah</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sheetal</namePart>
<namePart type="family">Sonawane</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 21st International Conference on Natural Language Processing (ICON)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sobha</namePart>
<namePart type="family">Lalitha Devi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Karunesh</namePart>
<namePart type="family">Arora</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>NLP Association of India (NLPAI)</publisher>
<place>
<placeTerm type="text">AU-KBC Research Centre, Chennai, India</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>With so much information available online, it‘s more important than ever to have reliable tools for summarizing text quickly and accurately. In this paper, we introduce a new way to improve the popular TextRank algorithm for extractive summarization. By adding a dynamic damping factor and using Latent Dirichlet Allocation (LDA) to enhance how text is represented, our method creates more meaningful summaries. We tested it with metrics like Pyramid, METEOR, and ROUGE, and compared it to the original TextRank. The results were promising, showing that our approach produces better summaries and could be useful for real-world applications like text mining and information retrieval.</abstract>
<identifier type="citekey">n-vora-etal-2024-extractive</identifier>
<location>
<url>https://aclanthology.org/2024.icon-1.54/</url>
</location>
<part>
<date>2024-12</date>
<extent unit="page">
<start>462</start>
<end>471</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Extractive Summarization using Extended TextRank Algorithm
%A N. Vora, Ansh
%A Jain, Rinit Mayur
%A Shah, Aastha Sanjeev
%A Sonawane, Sheetal
%Y Lalitha Devi, Sobha
%Y Arora, Karunesh
%S Proceedings of the 21st International Conference on Natural Language Processing (ICON)
%D 2024
%8 December
%I NLP Association of India (NLPAI)
%C AU-KBC Research Centre, Chennai, India
%F n-vora-etal-2024-extractive
%X With so much information available online, it‘s more important than ever to have reliable tools for summarizing text quickly and accurately. In this paper, we introduce a new way to improve the popular TextRank algorithm for extractive summarization. By adding a dynamic damping factor and using Latent Dirichlet Allocation (LDA) to enhance how text is represented, our method creates more meaningful summaries. We tested it with metrics like Pyramid, METEOR, and ROUGE, and compared it to the original TextRank. The results were promising, showing that our approach produces better summaries and could be useful for real-world applications like text mining and information retrieval.
%U https://aclanthology.org/2024.icon-1.54/
%P 462-471
Markdown (Informal)
[Extractive Summarization using Extended TextRank Algorithm](https://aclanthology.org/2024.icon-1.54/) (N. Vora et al., ICON 2024)
ACL
- Ansh N. Vora, Rinit Mayur Jain, Aastha Sanjeev Shah, and Sheetal Sonawane. 2024. Extractive Summarization using Extended TextRank Algorithm. In Proceedings of the 21st International Conference on Natural Language Processing (ICON), pages 462–471, AU-KBC Research Centre, Chennai, India. NLP Association of India (NLPAI).