@inproceedings{khandoga-etal-2025-framing,
title = "Framing the Language: Fine-Tuning Gemma 3 for Manipulation Detection",
author = "Khandoga, Mykola and
Kostiuk, Yevhen and
Polishko, Anton and
Kozlov, Kostiantyn and
Filipchuk, Yurii and
Kiulian, Artur",
editor = "Romanyshyn, Mariana",
booktitle = "Proceedings of the Fourth Ukrainian Natural Language Processing Workshop (UNLP 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria (online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.unlp-1.6/",
doi = "10.18653/v1/2025.unlp-1.6",
pages = "49--54",
ISBN = "979-8-89176-269-5",
abstract = "In this paper, we present our solutions for the two UNLP 2025 shared tasks: manipulation span detection and manipulation technique classification in Ukraine-related media content sourced from Telegram channels. We experimented with fine-tuning large language models (LLMs) with up to 12 billion parameters, including both encoder- and decoder-based architectures. Our experiments identified Gemma 3 12b with a custom classification head as the best-performing model for both tasks. To address the limited size of the original training dataset, we generated 50k synthetic samples and marked up an additional 400k media entries containing manipulative content."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="khandoga-etal-2025-framing">
<titleInfo>
<title>Framing the Language: Fine-Tuning Gemma 3 for Manipulation Detection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mykola</namePart>
<namePart type="family">Khandoga</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yevhen</namePart>
<namePart type="family">Kostiuk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anton</namePart>
<namePart type="family">Polishko</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kostiantyn</namePart>
<namePart type="family">Kozlov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yurii</namePart>
<namePart type="family">Filipchuk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Artur</namePart>
<namePart type="family">Kiulian</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourth Ukrainian Natural Language Processing Workshop (UNLP 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mariana</namePart>
<namePart type="family">Romanyshyn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria (online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-269-5</identifier>
</relatedItem>
<abstract>In this paper, we present our solutions for the two UNLP 2025 shared tasks: manipulation span detection and manipulation technique classification in Ukraine-related media content sourced from Telegram channels. We experimented with fine-tuning large language models (LLMs) with up to 12 billion parameters, including both encoder- and decoder-based architectures. Our experiments identified Gemma 3 12b with a custom classification head as the best-performing model for both tasks. To address the limited size of the original training dataset, we generated 50k synthetic samples and marked up an additional 400k media entries containing manipulative content.</abstract>
<identifier type="citekey">khandoga-etal-2025-framing</identifier>
<identifier type="doi">10.18653/v1/2025.unlp-1.6</identifier>
<location>
<url>https://aclanthology.org/2025.unlp-1.6/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>49</start>
<end>54</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Framing the Language: Fine-Tuning Gemma 3 for Manipulation Detection
%A Khandoga, Mykola
%A Kostiuk, Yevhen
%A Polishko, Anton
%A Kozlov, Kostiantyn
%A Filipchuk, Yurii
%A Kiulian, Artur
%Y Romanyshyn, Mariana
%S Proceedings of the Fourth Ukrainian Natural Language Processing Workshop (UNLP 2025)
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria (online)
%@ 979-8-89176-269-5
%F khandoga-etal-2025-framing
%X In this paper, we present our solutions for the two UNLP 2025 shared tasks: manipulation span detection and manipulation technique classification in Ukraine-related media content sourced from Telegram channels. We experimented with fine-tuning large language models (LLMs) with up to 12 billion parameters, including both encoder- and decoder-based architectures. Our experiments identified Gemma 3 12b with a custom classification head as the best-performing model for both tasks. To address the limited size of the original training dataset, we generated 50k synthetic samples and marked up an additional 400k media entries containing manipulative content.
%R 10.18653/v1/2025.unlp-1.6
%U https://aclanthology.org/2025.unlp-1.6/
%U https://doi.org/10.18653/v1/2025.unlp-1.6
%P 49-54
Markdown (Informal)
[Framing the Language: Fine-Tuning Gemma 3 for Manipulation Detection](https://aclanthology.org/2025.unlp-1.6/) (Khandoga et al., UNLP 2025)
ACL
- Mykola Khandoga, Yevhen Kostiuk, Anton Polishko, Kostiantyn Kozlov, Yurii Filipchuk, and Artur Kiulian. 2025. Framing the Language: Fine-Tuning Gemma 3 for Manipulation Detection. In Proceedings of the Fourth Ukrainian Natural Language Processing Workshop (UNLP 2025), pages 49–54, Vienna, Austria (online). Association for Computational Linguistics.