@inproceedings{corallo-etal-2025-tablekv,
title = "{T}able{KV}: {KV} Cache Compression for In-Context Table Processing",
author = "Corallo, Giulio and
Faure-Rolland, Elia and
Lamari, Miriam and
Papotti, Paolo",
editor = "Chang, Shuaichen and
Hulsebos, Madelon and
Liu, Qian and
Chen, Wenhu and
Sun, Huan",
booktitle = "Proceedings of the 4th Table Representation Learning Workshop",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.trl-1.13/",
doi = "10.18653/v1/2025.trl-1.13",
pages = "166--171",
ISBN = "979-8-89176-268-8",
abstract = "Processing large tables provided in-context to LLMs is challenging due to token limits and information overload. While Retrieval-Augmented Generation can select relevant subsets externally, this work explores Key-Value (KV) cache compression as an alternative, applied directly to the linearized table during inference. We show that the LLM{'}s internal attention scores over the table context guides the retention of essential KV pairs, effectively compressing the processing context while preserving crucial relational information needed for complex queries. Experiments on Spider, WikitableQA, and QTSumm datasets validate the compression approach for in-context table processing, offering a promising path for improved table representation learning in LLMs."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="corallo-etal-2025-tablekv">
<titleInfo>
<title>TableKV: KV Cache Compression for In-Context Table Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Giulio</namePart>
<namePart type="family">Corallo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elia</namePart>
<namePart type="family">Faure-Rolland</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Miriam</namePart>
<namePart type="family">Lamari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Paolo</namePart>
<namePart type="family">Papotti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 4th Table Representation Learning Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shuaichen</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Madelon</namePart>
<namePart type="family">Hulsebos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qian</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wenhu</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Huan</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-268-8</identifier>
</relatedItem>
<abstract>Processing large tables provided in-context to LLMs is challenging due to token limits and information overload. While Retrieval-Augmented Generation can select relevant subsets externally, this work explores Key-Value (KV) cache compression as an alternative, applied directly to the linearized table during inference. We show that the LLM’s internal attention scores over the table context guides the retention of essential KV pairs, effectively compressing the processing context while preserving crucial relational information needed for complex queries. Experiments on Spider, WikitableQA, and QTSumm datasets validate the compression approach for in-context table processing, offering a promising path for improved table representation learning in LLMs.</abstract>
<identifier type="citekey">corallo-etal-2025-tablekv</identifier>
<identifier type="doi">10.18653/v1/2025.trl-1.13</identifier>
<location>
<url>https://aclanthology.org/2025.trl-1.13/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>166</start>
<end>171</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T TableKV: KV Cache Compression for In-Context Table Processing
%A Corallo, Giulio
%A Faure-Rolland, Elia
%A Lamari, Miriam
%A Papotti, Paolo
%Y Chang, Shuaichen
%Y Hulsebos, Madelon
%Y Liu, Qian
%Y Chen, Wenhu
%Y Sun, Huan
%S Proceedings of the 4th Table Representation Learning Workshop
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-268-8
%F corallo-etal-2025-tablekv
%X Processing large tables provided in-context to LLMs is challenging due to token limits and information overload. While Retrieval-Augmented Generation can select relevant subsets externally, this work explores Key-Value (KV) cache compression as an alternative, applied directly to the linearized table during inference. We show that the LLM’s internal attention scores over the table context guides the retention of essential KV pairs, effectively compressing the processing context while preserving crucial relational information needed for complex queries. Experiments on Spider, WikitableQA, and QTSumm datasets validate the compression approach for in-context table processing, offering a promising path for improved table representation learning in LLMs.
%R 10.18653/v1/2025.trl-1.13
%U https://aclanthology.org/2025.trl-1.13/
%U https://doi.org/10.18653/v1/2025.trl-1.13
%P 166-171
Markdown (Informal)
[TableKV: KV Cache Compression for In-Context Table Processing](https://aclanthology.org/2025.trl-1.13/) (Corallo et al., TRL 2025)
ACL