@inproceedings{tanev-etal-2025-leveraging,
title = "Leveraging {LL}a{M}a for Abstractive Text Summarisation in {M}alayalam: An Experimental Study",
author = "Tanev, Hristo and
Pillai, Anitha S. and
R, Revathy V.",
editor = "Angelova, Galia and
Kunilovskaya, Maria and
Escribe, Marie and
Mitkov, Ruslan",
booktitle = "Proceedings of the 15th International Conference on Recent Advances in Natural Language Processing - Natural Language Processing in the Generative AI Era",
month = sep,
year = "2025",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd., Shoumen, Bulgaria",
url = "https://aclanthology.org/2025.ranlp-1.144/",
pages = "1248--1255",
abstract = "Recent years witnessed tremendous advancements in natural language processing (NLP) because of the development of complex language models that have automated several NLP applications, including text summarisation. Despite this progress, Malayalam text summarisation still faces challenges because of the peculiarities of the language. This research paper explores the potential of using a large language model, specifically the LLaMA (Large Language Model Meta AI) framework, for text summarisation of Malayalam language. In order to assess the performance of LLaMA for text summarization, for the low-resource language Malayalam, a dataset was curated with reference text and summaries. The evaluation showed that the LLaMA model could effectively summarize lengthy articles while maintaining important information and coherence. The generated summaries were compared with the reference summaries generated by human writers to observe how well aligned the model was with a human level of summarisation. The results proved that LLM can deal with the Malayalam text summarisation task, but more research is needed to understand the most relevant training strategy."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tanev-etal-2025-leveraging">
<titleInfo>
<title>Leveraging LLaMa for Abstractive Text Summarisation in Malayalam: An Experimental Study</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hristo</namePart>
<namePart type="family">Tanev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anitha</namePart>
<namePart type="given">S</namePart>
<namePart type="family">Pillai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Revathy</namePart>
<namePart type="given">V</namePart>
<namePart type="family">R</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 15th International Conference on Recent Advances in Natural Language Processing - Natural Language Processing in the Generative AI Era</title>
</titleInfo>
<name type="personal">
<namePart type="given">Galia</namePart>
<namePart type="family">Angelova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maria</namePart>
<namePart type="family">Kunilovskaya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marie</namePart>
<namePart type="family">Escribe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ruslan</namePart>
<namePart type="family">Mitkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd., Shoumen, Bulgaria</publisher>
<place>
<placeTerm type="text">Varna, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent years witnessed tremendous advancements in natural language processing (NLP) because of the development of complex language models that have automated several NLP applications, including text summarisation. Despite this progress, Malayalam text summarisation still faces challenges because of the peculiarities of the language. This research paper explores the potential of using a large language model, specifically the LLaMA (Large Language Model Meta AI) framework, for text summarisation of Malayalam language. In order to assess the performance of LLaMA for text summarization, for the low-resource language Malayalam, a dataset was curated with reference text and summaries. The evaluation showed that the LLaMA model could effectively summarize lengthy articles while maintaining important information and coherence. The generated summaries were compared with the reference summaries generated by human writers to observe how well aligned the model was with a human level of summarisation. The results proved that LLM can deal with the Malayalam text summarisation task, but more research is needed to understand the most relevant training strategy.</abstract>
<identifier type="citekey">tanev-etal-2025-leveraging</identifier>
<location>
<url>https://aclanthology.org/2025.ranlp-1.144/</url>
</location>
<part>
<date>2025-09</date>
<extent unit="page">
<start>1248</start>
<end>1255</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Leveraging LLaMa for Abstractive Text Summarisation in Malayalam: An Experimental Study
%A Tanev, Hristo
%A Pillai, Anitha S.
%A R, Revathy V.
%Y Angelova, Galia
%Y Kunilovskaya, Maria
%Y Escribe, Marie
%Y Mitkov, Ruslan
%S Proceedings of the 15th International Conference on Recent Advances in Natural Language Processing - Natural Language Processing in the Generative AI Era
%D 2025
%8 September
%I INCOMA Ltd., Shoumen, Bulgaria
%C Varna, Bulgaria
%F tanev-etal-2025-leveraging
%X Recent years witnessed tremendous advancements in natural language processing (NLP) because of the development of complex language models that have automated several NLP applications, including text summarisation. Despite this progress, Malayalam text summarisation still faces challenges because of the peculiarities of the language. This research paper explores the potential of using a large language model, specifically the LLaMA (Large Language Model Meta AI) framework, for text summarisation of Malayalam language. In order to assess the performance of LLaMA for text summarization, for the low-resource language Malayalam, a dataset was curated with reference text and summaries. The evaluation showed that the LLaMA model could effectively summarize lengthy articles while maintaining important information and coherence. The generated summaries were compared with the reference summaries generated by human writers to observe how well aligned the model was with a human level of summarisation. The results proved that LLM can deal with the Malayalam text summarisation task, but more research is needed to understand the most relevant training strategy.
%U https://aclanthology.org/2025.ranlp-1.144/
%P 1248-1255
Markdown (Informal)
[Leveraging LLaMa for Abstractive Text Summarisation in Malayalam: An Experimental Study](https://aclanthology.org/2025.ranlp-1.144/) (Tanev et al., RANLP 2025)
ACL