@inproceedings{jose-greenstadt-2025-llms,
title = "{LLM}s for Detection and Classification of Persuasion Techniques in {S}lavic Parliamentary Debates and Social Media Texts",
author = "Jose, Julia and
Greenstadt, Rachel",
editor = "Piskorski, Jakub and
P{\v{r}}ib{\'a}{\v{n}}, Pavel and
Nakov, Preslav and
Yangarber, Roman and
Marcinczuk, Michal",
booktitle = "Proceedings of the 10th Workshop on Slavic Natural Language Processing (Slavic NLP 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.bsnlp-1.23/",
doi = "10.18653/v1/2025.bsnlp-1.23",
pages = "202--216",
ISBN = "978-1-959429-57-9",
abstract = "We present an LLM-based method for the Slavic NLP 2025 shared task on detection and classification of persuasion techniques in parliamentary debates and social media. Our system uses OpenAI{'}s GPT models (gpt-4o-mini) and reasoning models (o4-mini) with chain-of-thought prompting, enforcing a geq 0.99 confidence threshold for verbatim span extraction. For subtask 1, each paragraph in the text is labeled ``true'' if any of the 25 persuasion techniques is present. For subtask 2, the model returns the full set of techniques used per paragraph. Across Bulgarian, Croatian, Polish, Russian, and Slovenian, we achieve Subtask 1 micro-F1 of 81.7{\%}, 83.3{\%}, 81.6{\%}, 73.5{\%}, 62.0{\%}, respectively, and Subtask 2 F1 of 41.0{\%}, 44.4{\%}, 41.9{\%}, 29.3{\%}, 29.9{\%}, respectively. Our system ranked in the top 2 for Subtask 2 and top 7 for Subtask 1."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jose-greenstadt-2025-llms">
<titleInfo>
<title>LLMs for Detection and Classification of Persuasion Techniques in Slavic Parliamentary Debates and Social Media Texts</title>
</titleInfo>
<name type="personal">
<namePart type="given">Julia</namePart>
<namePart type="family">Jose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rachel</namePart>
<namePart type="family">Greenstadt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 10th Workshop on Slavic Natural Language Processing (Slavic NLP 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jakub</namePart>
<namePart type="family">Piskorski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pavel</namePart>
<namePart type="family">Přibáň</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Preslav</namePart>
<namePart type="family">Nakov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roman</namePart>
<namePart type="family">Yangarber</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michal</namePart>
<namePart type="family">Marcinczuk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">978-1-959429-57-9</identifier>
</relatedItem>
<abstract>We present an LLM-based method for the Slavic NLP 2025 shared task on detection and classification of persuasion techniques in parliamentary debates and social media. Our system uses OpenAI’s GPT models (gpt-4o-mini) and reasoning models (o4-mini) with chain-of-thought prompting, enforcing a geq 0.99 confidence threshold for verbatim span extraction. For subtask 1, each paragraph in the text is labeled “true” if any of the 25 persuasion techniques is present. For subtask 2, the model returns the full set of techniques used per paragraph. Across Bulgarian, Croatian, Polish, Russian, and Slovenian, we achieve Subtask 1 micro-F1 of 81.7%, 83.3%, 81.6%, 73.5%, 62.0%, respectively, and Subtask 2 F1 of 41.0%, 44.4%, 41.9%, 29.3%, 29.9%, respectively. Our system ranked in the top 2 for Subtask 2 and top 7 for Subtask 1.</abstract>
<identifier type="citekey">jose-greenstadt-2025-llms</identifier>
<identifier type="doi">10.18653/v1/2025.bsnlp-1.23</identifier>
<location>
<url>https://aclanthology.org/2025.bsnlp-1.23/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>202</start>
<end>216</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T LLMs for Detection and Classification of Persuasion Techniques in Slavic Parliamentary Debates and Social Media Texts
%A Jose, Julia
%A Greenstadt, Rachel
%Y Piskorski, Jakub
%Y Přibáň, Pavel
%Y Nakov, Preslav
%Y Yangarber, Roman
%Y Marcinczuk, Michal
%S Proceedings of the 10th Workshop on Slavic Natural Language Processing (Slavic NLP 2025)
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 978-1-959429-57-9
%F jose-greenstadt-2025-llms
%X We present an LLM-based method for the Slavic NLP 2025 shared task on detection and classification of persuasion techniques in parliamentary debates and social media. Our system uses OpenAI’s GPT models (gpt-4o-mini) and reasoning models (o4-mini) with chain-of-thought prompting, enforcing a geq 0.99 confidence threshold for verbatim span extraction. For subtask 1, each paragraph in the text is labeled “true” if any of the 25 persuasion techniques is present. For subtask 2, the model returns the full set of techniques used per paragraph. Across Bulgarian, Croatian, Polish, Russian, and Slovenian, we achieve Subtask 1 micro-F1 of 81.7%, 83.3%, 81.6%, 73.5%, 62.0%, respectively, and Subtask 2 F1 of 41.0%, 44.4%, 41.9%, 29.3%, 29.9%, respectively. Our system ranked in the top 2 for Subtask 2 and top 7 for Subtask 1.
%R 10.18653/v1/2025.bsnlp-1.23
%U https://aclanthology.org/2025.bsnlp-1.23/
%U https://doi.org/10.18653/v1/2025.bsnlp-1.23
%P 202-216
Markdown (Informal)
[LLMs for Detection and Classification of Persuasion Techniques in Slavic Parliamentary Debates and Social Media Texts](https://aclanthology.org/2025.bsnlp-1.23/) (Jose & Greenstadt, BSNLP 2025)
ACL