@inproceedings{loginova-2025-fine,
title = "{F}ine{-}{T}uned Transformers for Detection and Classification of Persuasion Techniques in {S}lavic Languages",
author = "Loginova, Ekaterina",
editor = "Piskorski, Jakub and
P{\v{r}}ib{\'a}{\v{n}}, Pavel and
Nakov, Preslav and
Yangarber, Roman and
Marcinczuk, Michal",
booktitle = "Proceedings of the 10th Workshop on Slavic Natural Language Processing (Slavic NLP 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.bsnlp-1.17/",
doi = "10.18653/v1/2025.bsnlp-1.17",
pages = "151--156",
ISBN = "978-1-959429-57-9",
abstract = "This paper details a system developed for the SlavicNLP 2025 Shared Task on the Detection and Classification of Persuasion Techniques in Texts for Slavic Languages (Bulgarian, Croatian, Polish, Russian and Slovene). The shared task comprises two subtasks: binary detection of persuasive content within text fragments and multi-class, multi-label identification of specific persuasion techniques at the token level. Our primary approach for both subtasks involved fine-tuning pre-trained multilingual Transformer models. For Subtask 1 (paragraph{-}level binary detection) we fine{-}tuned a multilingual Transformer sequence classifier, its training augmented by a set of additional labelled data. For Subtask 2 (token{-}level multi{-}label classification) we re{-}cast the problem as named{-}entity recognition. The resulting systems reached F1 score of 0.92 in paragraph{-}level detection (ranked third on average). We present our system architecture, data handling, training procedures, and official results, alongside areas for future improvement."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="loginova-2025-fine">
<titleInfo>
<title>Fine-Tuned Transformers for Detection and Classification of Persuasion Techniques in Slavic Languages</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Loginova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 10th Workshop on Slavic Natural Language Processing (Slavic NLP 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jakub</namePart>
<namePart type="family">Piskorski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pavel</namePart>
<namePart type="family">Přibáň</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Preslav</namePart>
<namePart type="family">Nakov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roman</namePart>
<namePart type="family">Yangarber</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michal</namePart>
<namePart type="family">Marcinczuk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">978-1-959429-57-9</identifier>
</relatedItem>
<abstract>This paper details a system developed for the SlavicNLP 2025 Shared Task on the Detection and Classification of Persuasion Techniques in Texts for Slavic Languages (Bulgarian, Croatian, Polish, Russian and Slovene). The shared task comprises two subtasks: binary detection of persuasive content within text fragments and multi-class, multi-label identification of specific persuasion techniques at the token level. Our primary approach for both subtasks involved fine-tuning pre-trained multilingual Transformer models. For Subtask 1 (paragraph-level binary detection) we fine-tuned a multilingual Transformer sequence classifier, its training augmented by a set of additional labelled data. For Subtask 2 (token-level multi-label classification) we re-cast the problem as named-entity recognition. The resulting systems reached F1 score of 0.92 in paragraph-level detection (ranked third on average). We present our system architecture, data handling, training procedures, and official results, alongside areas for future improvement.</abstract>
<identifier type="citekey">loginova-2025-fine</identifier>
<identifier type="doi">10.18653/v1/2025.bsnlp-1.17</identifier>
<location>
<url>https://aclanthology.org/2025.bsnlp-1.17/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>151</start>
<end>156</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Fine-Tuned Transformers for Detection and Classification of Persuasion Techniques in Slavic Languages
%A Loginova, Ekaterina
%Y Piskorski, Jakub
%Y Přibáň, Pavel
%Y Nakov, Preslav
%Y Yangarber, Roman
%Y Marcinczuk, Michal
%S Proceedings of the 10th Workshop on Slavic Natural Language Processing (Slavic NLP 2025)
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 978-1-959429-57-9
%F loginova-2025-fine
%X This paper details a system developed for the SlavicNLP 2025 Shared Task on the Detection and Classification of Persuasion Techniques in Texts for Slavic Languages (Bulgarian, Croatian, Polish, Russian and Slovene). The shared task comprises two subtasks: binary detection of persuasive content within text fragments and multi-class, multi-label identification of specific persuasion techniques at the token level. Our primary approach for both subtasks involved fine-tuning pre-trained multilingual Transformer models. For Subtask 1 (paragraph-level binary detection) we fine-tuned a multilingual Transformer sequence classifier, its training augmented by a set of additional labelled data. For Subtask 2 (token-level multi-label classification) we re-cast the problem as named-entity recognition. The resulting systems reached F1 score of 0.92 in paragraph-level detection (ranked third on average). We present our system architecture, data handling, training procedures, and official results, alongside areas for future improvement.
%R 10.18653/v1/2025.bsnlp-1.17
%U https://aclanthology.org/2025.bsnlp-1.17/
%U https://doi.org/10.18653/v1/2025.bsnlp-1.17
%P 151-156
Markdown (Informal)
[Fine‐Tuned Transformers for Detection and Classification of Persuasion Techniques in Slavic Languages](https://aclanthology.org/2025.bsnlp-1.17/) (Loginova, BSNLP 2025)
ACL