@inproceedings{kasner-dusek-2026-animatedllm-explaining,
title = "{A}nimated{LLM}: Explaining {LLM}s with Interactive Visualizations",
author = "Kasner, Zden{\v{e}}k and
Dusek, Ondrej",
editor = {A{\ss}enmacher, Matthias and
Biester, Laura and
Borg, Claudia and
Kov{\'a}cs, Gy{\"o}rgy and
Mieskes, Margot and
Serrano, Sofia},
booktitle = "Proceedings of the Seventh Workshop on Teaching Natural Language Processing ({T}each{NLP} 2026)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2026.teachingnlp-1.1/",
pages = "1--6",
ISBN = "979-8-89176-375-3",
abstract = "Large language models (LLMs) are becoming central to natural language processing education, yet materials showing their mechanics are sparse. We present AnimatedLLM, an interactive web application that provides step-by-step visualizations of a Transformer language model. AnimatedLLM runs entirely in the browser, using pre-computed traces of open LLMs applied on manually curated inputs. The application is available at https://animatedllm.github.io, both as a teaching aid and for self-educational purposes."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kasner-dusek-2026-animatedllm-explaining">
<titleInfo>
<title>AnimatedLLM: Explaining LLMs with Interactive Visualizations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zdeněk</namePart>
<namePart type="family">Kasner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ondrej</namePart>
<namePart type="family">Dusek</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2026-03</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Seventh Workshop on Teaching Natural Language Processing (TeachNLP 2026)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Matthias</namePart>
<namePart type="family">Aßenmacher</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Laura</namePart>
<namePart type="family">Biester</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Claudia</namePart>
<namePart type="family">Borg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">György</namePart>
<namePart type="family">Kovács</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Margot</namePart>
<namePart type="family">Mieskes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sofia</namePart>
<namePart type="family">Serrano</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Rabat, Morocco</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-375-3</identifier>
</relatedItem>
<abstract>Large language models (LLMs) are becoming central to natural language processing education, yet materials showing their mechanics are sparse. We present AnimatedLLM, an interactive web application that provides step-by-step visualizations of a Transformer language model. AnimatedLLM runs entirely in the browser, using pre-computed traces of open LLMs applied on manually curated inputs. The application is available at https://animatedllm.github.io, both as a teaching aid and for self-educational purposes.</abstract>
<identifier type="citekey">kasner-dusek-2026-animatedllm-explaining</identifier>
<location>
<url>https://aclanthology.org/2026.teachingnlp-1.1/</url>
</location>
<part>
<date>2026-03</date>
<extent unit="page">
<start>1</start>
<end>6</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T AnimatedLLM: Explaining LLMs with Interactive Visualizations
%A Kasner, Zdeněk
%A Dusek, Ondrej
%Y Aßenmacher, Matthias
%Y Biester, Laura
%Y Borg, Claudia
%Y Kovács, György
%Y Mieskes, Margot
%Y Serrano, Sofia
%S Proceedings of the Seventh Workshop on Teaching Natural Language Processing (TeachNLP 2026)
%D 2026
%8 March
%I Association for Computational Linguistics
%C Rabat, Morocco
%@ 979-8-89176-375-3
%F kasner-dusek-2026-animatedllm-explaining
%X Large language models (LLMs) are becoming central to natural language processing education, yet materials showing their mechanics are sparse. We present AnimatedLLM, an interactive web application that provides step-by-step visualizations of a Transformer language model. AnimatedLLM runs entirely in the browser, using pre-computed traces of open LLMs applied on manually curated inputs. The application is available at https://animatedllm.github.io, both as a teaching aid and for self-educational purposes.
%U https://aclanthology.org/2026.teachingnlp-1.1/
%P 1-6
Markdown (Informal)
[AnimatedLLM: Explaining LLMs with Interactive Visualizations](https://aclanthology.org/2026.teachingnlp-1.1/) (Kasner & Dusek, TeachingNLP 2026)
ACL