@inproceedings{mudryi-ignatenko-2025-precision,
title = "Precision vs. Perturbation: Robustness Analysis of Synonym Attacks in {U}krainian {NLP}",
author = "Mudryi, Volodymyr and
Ignatenko, Oleksii",
editor = "Romanyshyn, Mariana",
booktitle = "Proceedings of the Fourth Ukrainian Natural Language Processing Workshop (UNLP 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria (online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.unlp-1.15/",
doi = "10.18653/v1/2025.unlp-1.15",
pages = "131--146",
ISBN = "979-8-89176-269-5",
abstract = "Synonym-based adversarial tests reveal fragile word patterns that accuracy metrics overlook, while virtually no such diagnostics exist for Ukrainian, a morphologically rich and low{-}resource language. We present the first systematic robustness evaluation under synonym substitution in Ukrainian. Adapting TextFooler and BERT{-}Attack to Ukrainian, we (i) adjust a 15000{-}entry synonym dictionary to match proper word forms; (ii) integrate similarity filters; (iii) adapt masked{-}LM search so it generates only valid inflected words. Across three text classification datasets (reviews, news headlines, social{-}media manipulation) and three transformer models (Ukr{-}RoBERTa, XLM{-}RoBERTa, SBERT), single{-}word swaps reduce accuracy by up to 12.6, while multi{-}step attacks degrade performance by as much as 40.27 with around 112 model queries. A few{-}shot transfer test shows GPT{-}4o, a state{-}of{-}the{-}art multilingual LLM, still suffers 6.9{--}15.0 drops on the same adversarial samples. Our results underscore the need for sense{-}aware, morphology{-}constrained synonym resources and provide a reproducible benchmark for future robustness research in Ukrainian NLP."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="mudryi-ignatenko-2025-precision">
<titleInfo>
<title>Precision vs. Perturbation: Robustness Analysis of Synonym Attacks in Ukrainian NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Volodymyr</namePart>
<namePart type="family">Mudryi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Oleksii</namePart>
<namePart type="family">Ignatenko</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourth Ukrainian Natural Language Processing Workshop (UNLP 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mariana</namePart>
<namePart type="family">Romanyshyn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria (online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-269-5</identifier>
</relatedItem>
<abstract>Synonym-based adversarial tests reveal fragile word patterns that accuracy metrics overlook, while virtually no such diagnostics exist for Ukrainian, a morphologically rich and low-resource language. We present the first systematic robustness evaluation under synonym substitution in Ukrainian. Adapting TextFooler and BERT-Attack to Ukrainian, we (i) adjust a 15000-entry synonym dictionary to match proper word forms; (ii) integrate similarity filters; (iii) adapt masked-LM search so it generates only valid inflected words. Across three text classification datasets (reviews, news headlines, social-media manipulation) and three transformer models (Ukr-RoBERTa, XLM-RoBERTa, SBERT), single-word swaps reduce accuracy by up to 12.6, while multi-step attacks degrade performance by as much as 40.27 with around 112 model queries. A few-shot transfer test shows GPT-4o, a state-of-the-art multilingual LLM, still suffers 6.9–15.0 drops on the same adversarial samples. Our results underscore the need for sense-aware, morphology-constrained synonym resources and provide a reproducible benchmark for future robustness research in Ukrainian NLP.</abstract>
<identifier type="citekey">mudryi-ignatenko-2025-precision</identifier>
<identifier type="doi">10.18653/v1/2025.unlp-1.15</identifier>
<location>
<url>https://aclanthology.org/2025.unlp-1.15/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>131</start>
<end>146</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Precision vs. Perturbation: Robustness Analysis of Synonym Attacks in Ukrainian NLP
%A Mudryi, Volodymyr
%A Ignatenko, Oleksii
%Y Romanyshyn, Mariana
%S Proceedings of the Fourth Ukrainian Natural Language Processing Workshop (UNLP 2025)
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria (online)
%@ 979-8-89176-269-5
%F mudryi-ignatenko-2025-precision
%X Synonym-based adversarial tests reveal fragile word patterns that accuracy metrics overlook, while virtually no such diagnostics exist for Ukrainian, a morphologically rich and low-resource language. We present the first systematic robustness evaluation under synonym substitution in Ukrainian. Adapting TextFooler and BERT-Attack to Ukrainian, we (i) adjust a 15000-entry synonym dictionary to match proper word forms; (ii) integrate similarity filters; (iii) adapt masked-LM search so it generates only valid inflected words. Across three text classification datasets (reviews, news headlines, social-media manipulation) and three transformer models (Ukr-RoBERTa, XLM-RoBERTa, SBERT), single-word swaps reduce accuracy by up to 12.6, while multi-step attacks degrade performance by as much as 40.27 with around 112 model queries. A few-shot transfer test shows GPT-4o, a state-of-the-art multilingual LLM, still suffers 6.9–15.0 drops on the same adversarial samples. Our results underscore the need for sense-aware, morphology-constrained synonym resources and provide a reproducible benchmark for future robustness research in Ukrainian NLP.
%R 10.18653/v1/2025.unlp-1.15
%U https://aclanthology.org/2025.unlp-1.15/
%U https://doi.org/10.18653/v1/2025.unlp-1.15
%P 131-146
Markdown (Informal)
[Precision vs. Perturbation: Robustness Analysis of Synonym Attacks in Ukrainian NLP](https://aclanthology.org/2025.unlp-1.15/) (Mudryi & Ignatenko, UNLP 2025)
ACL