@inproceedings{anschutz-etal-2024-simpler,
title = "Simpler Becomes Harder: Do {LLM}s Exhibit a Coherent Behavior on Simplified Corpora?",
author = {Ansch{\"u}tz, Miriam and
Mosca, Edoardo and
Groh, Georg},
editor = "Nunzio, Giorgio Maria Di and
Vezzani, Federica and
Ermakova, Liana and
Azarbonyad, Hosein and
Kamps, Jaap",
booktitle = "Proceedings of the Workshop on DeTermIt! Evaluating Text Difficulty in a Multilingual Context @ LREC-COLING 2024",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://aclanthology.org/2024.determit-1.17",
pages = "185--195",
abstract = "Text simplification seeks to improve readability while retaining the original content and meaning. Our study investigates whether pre-trained classifiers also maintain such coherence by comparing their predictions on both original and simplified inputs. We conduct experiments using 11 pre-trained models, including BERT and OpenAI{'}s GPT 3.5, across six datasets spanning three languages. Additionally, we conduct a detailed analysis of the correlation between prediction change rates and simplification types/strengths. Our findings reveal alarming inconsistencies across all languages and models. If not promptly addressed, simplified inputs can be easily exploited to craft zero-iteration model-agnostic adversarial attacks with success rates of up to 50{\%}.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="anschutz-etal-2024-simpler">
<titleInfo>
<title>Simpler Becomes Harder: Do LLMs Exhibit a Coherent Behavior on Simplified Corpora?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Miriam</namePart>
<namePart type="family">Anschütz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Edoardo</namePart>
<namePart type="family">Mosca</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Georg</namePart>
<namePart type="family">Groh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on DeTermIt! Evaluating Text Difficulty in a Multilingual Context @ LREC-COLING 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Giorgio</namePart>
<namePart type="given">Maria</namePart>
<namePart type="given">Di</namePart>
<namePart type="family">Nunzio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Federica</namePart>
<namePart type="family">Vezzani</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Liana</namePart>
<namePart type="family">Ermakova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hosein</namePart>
<namePart type="family">Azarbonyad</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jaap</namePart>
<namePart type="family">Kamps</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>ELRA and ICCL</publisher>
<place>
<placeTerm type="text">Torino, Italia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Text simplification seeks to improve readability while retaining the original content and meaning. Our study investigates whether pre-trained classifiers also maintain such coherence by comparing their predictions on both original and simplified inputs. We conduct experiments using 11 pre-trained models, including BERT and OpenAI’s GPT 3.5, across six datasets spanning three languages. Additionally, we conduct a detailed analysis of the correlation between prediction change rates and simplification types/strengths. Our findings reveal alarming inconsistencies across all languages and models. If not promptly addressed, simplified inputs can be easily exploited to craft zero-iteration model-agnostic adversarial attacks with success rates of up to 50%.</abstract>
<identifier type="citekey">anschutz-etal-2024-simpler</identifier>
<location>
<url>https://aclanthology.org/2024.determit-1.17</url>
</location>
<part>
<date>2024-05</date>
<extent unit="page">
<start>185</start>
<end>195</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Simpler Becomes Harder: Do LLMs Exhibit a Coherent Behavior on Simplified Corpora?
%A Anschütz, Miriam
%A Mosca, Edoardo
%A Groh, Georg
%Y Nunzio, Giorgio Maria Di
%Y Vezzani, Federica
%Y Ermakova, Liana
%Y Azarbonyad, Hosein
%Y Kamps, Jaap
%S Proceedings of the Workshop on DeTermIt! Evaluating Text Difficulty in a Multilingual Context @ LREC-COLING 2024
%D 2024
%8 May
%I ELRA and ICCL
%C Torino, Italia
%F anschutz-etal-2024-simpler
%X Text simplification seeks to improve readability while retaining the original content and meaning. Our study investigates whether pre-trained classifiers also maintain such coherence by comparing their predictions on both original and simplified inputs. We conduct experiments using 11 pre-trained models, including BERT and OpenAI’s GPT 3.5, across six datasets spanning three languages. Additionally, we conduct a detailed analysis of the correlation between prediction change rates and simplification types/strengths. Our findings reveal alarming inconsistencies across all languages and models. If not promptly addressed, simplified inputs can be easily exploited to craft zero-iteration model-agnostic adversarial attacks with success rates of up to 50%.
%U https://aclanthology.org/2024.determit-1.17
%P 185-195
Markdown (Informal)
[Simpler Becomes Harder: Do LLMs Exhibit a Coherent Behavior on Simplified Corpora?](https://aclanthology.org/2024.determit-1.17) (Anschütz et al., DeTermIt-WS 2024)
ACL