@inproceedings{du-etal-2025-decoding,
title = "Decoding Emotion in Ancient Poetry: Leveraging Generative Models for Classical {C}hinese Sentiment Analysis",
author = "Du, Quanqi and
De Langhe, Loic and
Lefever, Els and
Hoste, Veronique",
editor = "Angelova, Galia and
Kunilovskaya, Maria and
Escribe, Marie and
Mitkov, Ruslan",
booktitle = "Proceedings of the 15th International Conference on Recent Advances in Natural Language Processing - Natural Language Processing in the Generative AI Era",
month = sep,
year = "2025",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd., Shoumen, Bulgaria",
url = "https://aclanthology.org/2025.ranlp-1.38/",
pages = "306--315",
abstract = "This study explores the use of generative language models for sentiment analysis of classical Chinese poetry, aiming to better understand emotional expression in literary texts. Using the FSPC dataset, we evaluate two models, Qwen-2.5 and LLaMA-3.1, under various prompting strategies. Initial experiments show that base models struggle with task-specific instructions. By applying different instruction tuning strategies with Low-Rank Adaptation (LoRA), we significantly enhance the models' ability to follow task instructions and capture poetic sentiment, with LLaMA-3.1 achieving the best results (67.10{\%} accuracy, 65.42{\%} macro F1), demonstrate competitive performance against data-intensive, domain-adapted baselines. We further examine the effects of prompt language and multi-task learning, finding that English prompts outperform Chinese ones. These results highlight the promise of instruction-tuned generative models in sentiment analysis of classical Chinese poetry, and underscore the importance of prompt formulation in literary understanding tasks."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="du-etal-2025-decoding">
<titleInfo>
<title>Decoding Emotion in Ancient Poetry: Leveraging Generative Models for Classical Chinese Sentiment Analysis</title>
</titleInfo>
<name type="personal">
<namePart type="given">Quanqi</namePart>
<namePart type="family">Du</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Loic</namePart>
<namePart type="family">De Langhe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Els</namePart>
<namePart type="family">Lefever</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Veronique</namePart>
<namePart type="family">Hoste</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 15th International Conference on Recent Advances in Natural Language Processing - Natural Language Processing in the Generative AI Era</title>
</titleInfo>
<name type="personal">
<namePart type="given">Galia</namePart>
<namePart type="family">Angelova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maria</namePart>
<namePart type="family">Kunilovskaya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marie</namePart>
<namePart type="family">Escribe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ruslan</namePart>
<namePart type="family">Mitkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd., Shoumen, Bulgaria</publisher>
<place>
<placeTerm type="text">Varna, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This study explores the use of generative language models for sentiment analysis of classical Chinese poetry, aiming to better understand emotional expression in literary texts. Using the FSPC dataset, we evaluate two models, Qwen-2.5 and LLaMA-3.1, under various prompting strategies. Initial experiments show that base models struggle with task-specific instructions. By applying different instruction tuning strategies with Low-Rank Adaptation (LoRA), we significantly enhance the models’ ability to follow task instructions and capture poetic sentiment, with LLaMA-3.1 achieving the best results (67.10% accuracy, 65.42% macro F1), demonstrate competitive performance against data-intensive, domain-adapted baselines. We further examine the effects of prompt language and multi-task learning, finding that English prompts outperform Chinese ones. These results highlight the promise of instruction-tuned generative models in sentiment analysis of classical Chinese poetry, and underscore the importance of prompt formulation in literary understanding tasks.</abstract>
<identifier type="citekey">du-etal-2025-decoding</identifier>
<location>
<url>https://aclanthology.org/2025.ranlp-1.38/</url>
</location>
<part>
<date>2025-09</date>
<extent unit="page">
<start>306</start>
<end>315</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Decoding Emotion in Ancient Poetry: Leveraging Generative Models for Classical Chinese Sentiment Analysis
%A Du, Quanqi
%A De Langhe, Loic
%A Lefever, Els
%A Hoste, Veronique
%Y Angelova, Galia
%Y Kunilovskaya, Maria
%Y Escribe, Marie
%Y Mitkov, Ruslan
%S Proceedings of the 15th International Conference on Recent Advances in Natural Language Processing - Natural Language Processing in the Generative AI Era
%D 2025
%8 September
%I INCOMA Ltd., Shoumen, Bulgaria
%C Varna, Bulgaria
%F du-etal-2025-decoding
%X This study explores the use of generative language models for sentiment analysis of classical Chinese poetry, aiming to better understand emotional expression in literary texts. Using the FSPC dataset, we evaluate two models, Qwen-2.5 and LLaMA-3.1, under various prompting strategies. Initial experiments show that base models struggle with task-specific instructions. By applying different instruction tuning strategies with Low-Rank Adaptation (LoRA), we significantly enhance the models’ ability to follow task instructions and capture poetic sentiment, with LLaMA-3.1 achieving the best results (67.10% accuracy, 65.42% macro F1), demonstrate competitive performance against data-intensive, domain-adapted baselines. We further examine the effects of prompt language and multi-task learning, finding that English prompts outperform Chinese ones. These results highlight the promise of instruction-tuned generative models in sentiment analysis of classical Chinese poetry, and underscore the importance of prompt formulation in literary understanding tasks.
%U https://aclanthology.org/2025.ranlp-1.38/
%P 306-315
Markdown (Informal)
[Decoding Emotion in Ancient Poetry: Leveraging Generative Models for Classical Chinese Sentiment Analysis](https://aclanthology.org/2025.ranlp-1.38/) (Du et al., RANLP 2025)
ACL