@inproceedings{wang-etal-2025-sparsifying,
title = "Sparsifying Mamba",
author = "Wang, An and
Xie, Ruobing and
Li, Shuaipeng and
Sun, Xingwu and
Kang, Zhanhui",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.959/",
doi = "10.18653/v1/2025.findings-emnlp.959",
pages = "17661--17667",
ISBN = "979-8-89176-335-7",
abstract = "The Transformer architecture has long dominated the development of large language models, but its quadratic complexity in sequence length presents scalability challenges. Recent advances in State Space Models, particularly Mamba series, offer a promising alternative with linear-time inference and competitive performance. While scaling model capacity via sparsification, exemplified by Mixture-of-Experts, has proven effective in reducing computation while expanding knowledge capacity, the integration of sparsification with Mamba remains largely unexplored. Existing attempts typically apply naive block-level stacking, failing to leverage Mamba{'}s internal structure for fine-grained sparsification. In this work, we mainly explore how to sparsify the parameters inside Mamba. We found that the effects of using sparsification strategies on parameters related to various mechanisms inside mamba are significantly different. Our proposed Mamba-MoZ framework introduces a flexible and effective sparsification mechanism inside Mamba, which can independently achieve parameter scalability and has stronger performance."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wang-etal-2025-sparsifying">
<titleInfo>
<title>Sparsifying Mamba</title>
</titleInfo>
<name type="personal">
<namePart type="given">An</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ruobing</namePart>
<namePart type="family">Xie</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shuaipeng</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xingwu</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhanhui</namePart>
<namePart type="family">Kang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>The Transformer architecture has long dominated the development of large language models, but its quadratic complexity in sequence length presents scalability challenges. Recent advances in State Space Models, particularly Mamba series, offer a promising alternative with linear-time inference and competitive performance. While scaling model capacity via sparsification, exemplified by Mixture-of-Experts, has proven effective in reducing computation while expanding knowledge capacity, the integration of sparsification with Mamba remains largely unexplored. Existing attempts typically apply naive block-level stacking, failing to leverage Mamba’s internal structure for fine-grained sparsification. In this work, we mainly explore how to sparsify the parameters inside Mamba. We found that the effects of using sparsification strategies on parameters related to various mechanisms inside mamba are significantly different. Our proposed Mamba-MoZ framework introduces a flexible and effective sparsification mechanism inside Mamba, which can independently achieve parameter scalability and has stronger performance.</abstract>
<identifier type="citekey">wang-etal-2025-sparsifying</identifier>
<identifier type="doi">10.18653/v1/2025.findings-emnlp.959</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.959/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>17661</start>
<end>17667</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Sparsifying Mamba
%A Wang, An
%A Xie, Ruobing
%A Li, Shuaipeng
%A Sun, Xingwu
%A Kang, Zhanhui
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F wang-etal-2025-sparsifying
%X The Transformer architecture has long dominated the development of large language models, but its quadratic complexity in sequence length presents scalability challenges. Recent advances in State Space Models, particularly Mamba series, offer a promising alternative with linear-time inference and competitive performance. While scaling model capacity via sparsification, exemplified by Mixture-of-Experts, has proven effective in reducing computation while expanding knowledge capacity, the integration of sparsification with Mamba remains largely unexplored. Existing attempts typically apply naive block-level stacking, failing to leverage Mamba’s internal structure for fine-grained sparsification. In this work, we mainly explore how to sparsify the parameters inside Mamba. We found that the effects of using sparsification strategies on parameters related to various mechanisms inside mamba are significantly different. Our proposed Mamba-MoZ framework introduces a flexible and effective sparsification mechanism inside Mamba, which can independently achieve parameter scalability and has stronger performance.
%R 10.18653/v1/2025.findings-emnlp.959
%U https://aclanthology.org/2025.findings-emnlp.959/
%U https://doi.org/10.18653/v1/2025.findings-emnlp.959
%P 17661-17667
Markdown (Informal)
[Sparsifying Mamba](https://aclanthology.org/2025.findings-emnlp.959/) (Wang et al., Findings 2025)
ACL
- An Wang, Ruobing Xie, Shuaipeng Li, Xingwu Sun, and Zhanhui Kang. 2025. Sparsifying Mamba. In Findings of the Association for Computational Linguistics: EMNLP 2025, pages 17661–17667, Suzhou, China. Association for Computational Linguistics.