@inproceedings{kuiper-etal-2025-lad,
title = "{LAD}: {L}o{RA}-Adapted Diffusion",
author = "Kuiper, Ruurd Jan Anthonius and
de Groot, Lars and
van Es, Bram and
van Smeden, Maarten and
Bagheri, Ayoub",
editor = {Habernal, Ivan and
Schulam, Peter and
Tiedemann, J{\"o}rg},
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.emnlp-demos.8/",
pages = "97--110",
ISBN = "979-8-89176-334-0",
abstract = "Autoregressive models dominate text generation but suffer from left-to-right decoding constraints that limit efficiency and bidirectional reasoning. Diffusion-based models offer a flexible alternative but face challenges in adapting to discrete text efficiently. We propose LAD (LoRA-Adapted Diffusion), a framework for non-autoregressive generation that adapts LLaMA models for iterative, bidirectional sequence refinement using LoRA adapters. LAD employs a structural denoising objective combining masking with text perturbations (swaps, duplications and span shifts), enabling full sequence editing during generation. We aim to demonstrate that LAD could be a viable and efficient alternative to training diffusion models from scratch, by providing both validation results as well as two interactive demos directly available online:https://ruurdkuiper.github.io/tini-lad/https://huggingface.co/spaces/Ruurd/tini-ladInference and training code:https://github.com/RuurdKuiper/lad-code"
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kuiper-etal-2025-lad">
<titleInfo>
<title>LAD: LoRA-Adapted Diffusion</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ruurd</namePart>
<namePart type="given">Jan</namePart>
<namePart type="given">Anthonius</namePart>
<namePart type="family">Kuiper</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lars</namePart>
<namePart type="family">de Groot</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bram</namePart>
<namePart type="family">van Es</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maarten</namePart>
<namePart type="family">van Smeden</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ayoub</namePart>
<namePart type="family">Bagheri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing: System Demonstrations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="family">Habernal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peter</namePart>
<namePart type="family">Schulam</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jörg</namePart>
<namePart type="family">Tiedemann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-334-0</identifier>
</relatedItem>
<abstract>Autoregressive models dominate text generation but suffer from left-to-right decoding constraints that limit efficiency and bidirectional reasoning. Diffusion-based models offer a flexible alternative but face challenges in adapting to discrete text efficiently. We propose LAD (LoRA-Adapted Diffusion), a framework for non-autoregressive generation that adapts LLaMA models for iterative, bidirectional sequence refinement using LoRA adapters. LAD employs a structural denoising objective combining masking with text perturbations (swaps, duplications and span shifts), enabling full sequence editing during generation. We aim to demonstrate that LAD could be a viable and efficient alternative to training diffusion models from scratch, by providing both validation results as well as two interactive demos directly available online:https://ruurdkuiper.github.io/tini-lad/https://huggingface.co/spaces/Ruurd/tini-ladInference and training code:https://github.com/RuurdKuiper/lad-code</abstract>
<identifier type="citekey">kuiper-etal-2025-lad</identifier>
<location>
<url>https://aclanthology.org/2025.emnlp-demos.8/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>97</start>
<end>110</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T LAD: LoRA-Adapted Diffusion
%A Kuiper, Ruurd Jan Anthonius
%A de Groot, Lars
%A van Es, Bram
%A van Smeden, Maarten
%A Bagheri, Ayoub
%Y Habernal, Ivan
%Y Schulam, Peter
%Y Tiedemann, Jörg
%S Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing: System Demonstrations
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-334-0
%F kuiper-etal-2025-lad
%X Autoregressive models dominate text generation but suffer from left-to-right decoding constraints that limit efficiency and bidirectional reasoning. Diffusion-based models offer a flexible alternative but face challenges in adapting to discrete text efficiently. We propose LAD (LoRA-Adapted Diffusion), a framework for non-autoregressive generation that adapts LLaMA models for iterative, bidirectional sequence refinement using LoRA adapters. LAD employs a structural denoising objective combining masking with text perturbations (swaps, duplications and span shifts), enabling full sequence editing during generation. We aim to demonstrate that LAD could be a viable and efficient alternative to training diffusion models from scratch, by providing both validation results as well as two interactive demos directly available online:https://ruurdkuiper.github.io/tini-lad/https://huggingface.co/spaces/Ruurd/tini-ladInference and training code:https://github.com/RuurdKuiper/lad-code
%U https://aclanthology.org/2025.emnlp-demos.8/
%P 97-110
Markdown (Informal)
[LAD: LoRA-Adapted Diffusion](https://aclanthology.org/2025.emnlp-demos.8/) (Kuiper et al., EMNLP 2025)
ACL
- Ruurd Jan Anthonius Kuiper, Lars de Groot, Bram van Es, Maarten van Smeden, and Ayoub Bagheri. 2025. LAD: LoRA-Adapted Diffusion. In Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 97–110, Suzhou, China. Association for Computational Linguistics.