@inproceedings{he-etal-2025-a2ats,
title = "{A}$^2${ATS}: Retrieval-Based {KV} Cache Reduction via Windowed Rotary Position Embedding and Query-Aware Vector Quantization",
author = "He, Junhui and
Xing, Junna and
Wang, Nan and
Xu, Rui and
Wu, Shangyu and
Zhou, Peng and
Liu, Qiang and
Xue, Chun Jason and
Li, Qingan",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-acl.644/",
doi = "10.18653/v1/2025.findings-acl.644",
pages = "12451--12463",
ISBN = "979-8-89176-256-5",
abstract = "Long context large language models (LLMs) pose significant challenges for efficient serving due to the large memory footprint and high access overhead of KV cache.Retrieval-based KV cache reduction methods can mitigate these challenges, typically by offloading the complete KV cache to CPU and retrieving necessary tokens on demand during inference.However, these methods still suffer from unsatisfactory accuracy degradation and extra retrieval overhead.To address these limitations, this paper proposes A$^2$ATS, a novel retrieval-based KV cache reduction method.A$^2$ATS aims to obtain an accurate approximation of attention scores by applying the vector quantization technique to key states, thereby enabling efficient and precise retrieval of the top-K tokens.First, we propose Windowed Rotary Position Embedding, which decouples the positional dependency from query and key states after position embedding.Then, we propose query-aware vector quantization that optimizes the objective of attention score approximation directly.Finally, we design the heterogeneous inference architecture for KV cache offloading, enabling long context serving with larger batch sizes.Experimental results demonstrate that A$^2$ATS can achieve a lower performance degradation with similar or lower overhead compared to existing methods, thereby increasing long context serving throughput by up to $2.7 \times$."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="he-etal-2025-a2ats">
<titleInfo>
<title>A²ATS: Retrieval-Based KV Cache Reduction via Windowed Rotary Position Embedding and Query-Aware Vector Quantization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Junhui</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Junna</namePart>
<namePart type="family">Xing</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nan</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rui</namePart>
<namePart type="family">Xu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shangyu</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peng</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qiang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chun</namePart>
<namePart type="given">Jason</namePart>
<namePart type="family">Xue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qingan</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-256-5</identifier>
</relatedItem>
<abstract>Long context large language models (LLMs) pose significant challenges for efficient serving due to the large memory footprint and high access overhead of KV cache.Retrieval-based KV cache reduction methods can mitigate these challenges, typically by offloading the complete KV cache to CPU and retrieving necessary tokens on demand during inference.However, these methods still suffer from unsatisfactory accuracy degradation and extra retrieval overhead.To address these limitations, this paper proposes A²ATS, a novel retrieval-based KV cache reduction method.A²ATS aims to obtain an accurate approximation of attention scores by applying the vector quantization technique to key states, thereby enabling efficient and precise retrieval of the top-K tokens.First, we propose Windowed Rotary Position Embedding, which decouples the positional dependency from query and key states after position embedding.Then, we propose query-aware vector quantization that optimizes the objective of attention score approximation directly.Finally, we design the heterogeneous inference architecture for KV cache offloading, enabling long context serving with larger batch sizes.Experimental results demonstrate that A²ATS can achieve a lower performance degradation with similar or lower overhead compared to existing methods, thereby increasing long context serving throughput by up to 2.7 \times.</abstract>
<identifier type="citekey">he-etal-2025-a2ats</identifier>
<identifier type="doi">10.18653/v1/2025.findings-acl.644</identifier>
<location>
<url>https://aclanthology.org/2025.findings-acl.644/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>12451</start>
<end>12463</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A²ATS: Retrieval-Based KV Cache Reduction via Windowed Rotary Position Embedding and Query-Aware Vector Quantization
%A He, Junhui
%A Xing, Junna
%A Wang, Nan
%A Xu, Rui
%A Wu, Shangyu
%A Zhou, Peng
%A Liu, Qiang
%A Xue, Chun Jason
%A Li, Qingan
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Findings of the Association for Computational Linguistics: ACL 2025
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-256-5
%F he-etal-2025-a2ats
%X Long context large language models (LLMs) pose significant challenges for efficient serving due to the large memory footprint and high access overhead of KV cache.Retrieval-based KV cache reduction methods can mitigate these challenges, typically by offloading the complete KV cache to CPU and retrieving necessary tokens on demand during inference.However, these methods still suffer from unsatisfactory accuracy degradation and extra retrieval overhead.To address these limitations, this paper proposes A²ATS, a novel retrieval-based KV cache reduction method.A²ATS aims to obtain an accurate approximation of attention scores by applying the vector quantization technique to key states, thereby enabling efficient and precise retrieval of the top-K tokens.First, we propose Windowed Rotary Position Embedding, which decouples the positional dependency from query and key states after position embedding.Then, we propose query-aware vector quantization that optimizes the objective of attention score approximation directly.Finally, we design the heterogeneous inference architecture for KV cache offloading, enabling long context serving with larger batch sizes.Experimental results demonstrate that A²ATS can achieve a lower performance degradation with similar or lower overhead compared to existing methods, thereby increasing long context serving throughput by up to 2.7 \times.
%R 10.18653/v1/2025.findings-acl.644
%U https://aclanthology.org/2025.findings-acl.644/
%U https://doi.org/10.18653/v1/2025.findings-acl.644
%P 12451-12463
Markdown (Informal)
[A2ATS: Retrieval-Based KV Cache Reduction via Windowed Rotary Position Embedding and Query-Aware Vector Quantization](https://aclanthology.org/2025.findings-acl.644/) (He et al., Findings 2025)
ACL
- Junhui He, Junna Xing, Nan Wang, Rui Xu, Shangyu Wu, Peng Zhou, Qiang Liu, Chun Jason Xue, and Qingan Li. 2025. A2ATS: Retrieval-Based KV Cache Reduction via Windowed Rotary Position Embedding and Query-Aware Vector Quantization. In Findings of the Association for Computational Linguistics: ACL 2025, pages 12451–12463, Vienna, Austria. Association for Computational Linguistics.