@inproceedings{oneill-2025-f,
title = "{F}*ck Around and Find Out: Quasi-Malicious Interactions with {LLM}s as a Site of Situated Learning",
author = "ONeill, Sarah",
editor = "Przyby{\l}a, Piotr and
Shardlow, Matthew and
Colombatto, Clara and
Inie, Nanna",
booktitle = "Proceedings of Interdisciplinary Workshop on Observations of Misunderstood, Misguided and Malicious Use of Language Models",
month = sep,
year = "2025",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd., Shoumen, Bulgaria",
url = "https://aclanthology.org/2025.ommm-1.6/",
pages = "53--58",
abstract = "This work-in-progress paper proposes a cross-disciplinary perspective on ``malicious'' interactions with large language models (LLMs), reframing it from only a threat to be mitigated, we ask whether certain adversarial interactions can also serve as productive learning encounters that demystify the opaque workings of AI systems to novice users. We ground this inquiry in an anecdotal observation of a student who deliberately sabotaged a machine-learning robot{'}s training process in order to understand its underlying logic. We outline this observation with a conceptual framework for learning with, through, and from the material quirks of LLMs grounded in Papert{'}s constructionism and Hasse{'}s ultra-social learning theory. Finally, we present the preliminary design of a research-through-workshop where non-experts will jailbreak various LLM chatbots, investigating this encounter as a situated learning process. We share this early-stage research as an invitation for feedback on reimagining inappropriate and harmful interactions with LLMs not merely as problems, but as opportunities for engagement and education."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="oneill-2025-f">
<titleInfo>
<title>F*ck Around and Find Out: Quasi-Malicious Interactions with LLMs as a Site of Situated Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sarah</namePart>
<namePart type="family">ONeill</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of Interdisciplinary Workshop on Observations of Misunderstood, Misguided and Malicious Use of Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Piotr</namePart>
<namePart type="family">Przybyła</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Matthew</namePart>
<namePart type="family">Shardlow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Clara</namePart>
<namePart type="family">Colombatto</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nanna</namePart>
<namePart type="family">Inie</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd., Shoumen, Bulgaria</publisher>
<place>
<placeTerm type="text">Varna, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This work-in-progress paper proposes a cross-disciplinary perspective on “malicious” interactions with large language models (LLMs), reframing it from only a threat to be mitigated, we ask whether certain adversarial interactions can also serve as productive learning encounters that demystify the opaque workings of AI systems to novice users. We ground this inquiry in an anecdotal observation of a student who deliberately sabotaged a machine-learning robot’s training process in order to understand its underlying logic. We outline this observation with a conceptual framework for learning with, through, and from the material quirks of LLMs grounded in Papert’s constructionism and Hasse’s ultra-social learning theory. Finally, we present the preliminary design of a research-through-workshop where non-experts will jailbreak various LLM chatbots, investigating this encounter as a situated learning process. We share this early-stage research as an invitation for feedback on reimagining inappropriate and harmful interactions with LLMs not merely as problems, but as opportunities for engagement and education.</abstract>
<identifier type="citekey">oneill-2025-f</identifier>
<location>
<url>https://aclanthology.org/2025.ommm-1.6/</url>
</location>
<part>
<date>2025-09</date>
<extent unit="page">
<start>53</start>
<end>58</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T F*ck Around and Find Out: Quasi-Malicious Interactions with LLMs as a Site of Situated Learning
%A ONeill, Sarah
%Y Przybyła, Piotr
%Y Shardlow, Matthew
%Y Colombatto, Clara
%Y Inie, Nanna
%S Proceedings of Interdisciplinary Workshop on Observations of Misunderstood, Misguided and Malicious Use of Language Models
%D 2025
%8 September
%I INCOMA Ltd., Shoumen, Bulgaria
%C Varna, Bulgaria
%F oneill-2025-f
%X This work-in-progress paper proposes a cross-disciplinary perspective on “malicious” interactions with large language models (LLMs), reframing it from only a threat to be mitigated, we ask whether certain adversarial interactions can also serve as productive learning encounters that demystify the opaque workings of AI systems to novice users. We ground this inquiry in an anecdotal observation of a student who deliberately sabotaged a machine-learning robot’s training process in order to understand its underlying logic. We outline this observation with a conceptual framework for learning with, through, and from the material quirks of LLMs grounded in Papert’s constructionism and Hasse’s ultra-social learning theory. Finally, we present the preliminary design of a research-through-workshop where non-experts will jailbreak various LLM chatbots, investigating this encounter as a situated learning process. We share this early-stage research as an invitation for feedback on reimagining inappropriate and harmful interactions with LLMs not merely as problems, but as opportunities for engagement and education.
%U https://aclanthology.org/2025.ommm-1.6/
%P 53-58
Markdown (Informal)
[F*ck Around and Find Out: Quasi-Malicious Interactions with LLMs as a Site of Situated Learning](https://aclanthology.org/2025.ommm-1.6/) (ONeill, OMMM 2025)
ACL