@inproceedings{srikumar-2025-arent,
title = "These Aren{'}t the Vectors You{'}re Looking For: A Proof of Quantum Advantage in Compositional Generalization",
author = "Srikumar, Karthik",
editor = "Pal, Santanu and
Pakray, Partha and
Jain, Priyanka and
Ekbal, Asif and
Bandyopadhyay, Sivaji",
booktitle = "Proceedings of the QuantumNLP{\{}:{\}} Integrating Quantum Computing with Natural Language Processing",
month = nov,
year = "2025",
address = "Mumbai, India (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.quantumnlp-1.2/",
pages = "6--9",
ISBN = "979-8-89176-306-7",
abstract = "Compositional generalization, the ability to systematically combine known concepts to understand and produce novel expressions, remains a fundamental, unsolved challenge for classical neural language models, whose reliance on statistical correlations in high-dimensional vector spaces inherently limits them. This paper establishes the first rigorous theoretical guarantee of an exponential quantum advantage for compositional generalization. We prove that classical language models, which represent concepts as vectors in $\mathbb{R}^d$, require a latent dimension scaling linearly with the number of concepts and compositional rules to avoid catastrophic interference. In contrast, we introduce the Quantum Compositional Embedding (QCE) framework, which leverages the intrinsic properties of quantum mechanics. In doing so, we demonstrate that QCE, utilizing only a logarithmic number of qubits, can perfectly represent and generalize compositional structures, a task provably impossible for classical models of equivalent dimensionality. The separation is proven to be exponential, providing a compelling theoretical foundation for quantum natural language processing."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="srikumar-2025-arent">
<titleInfo>
<title>These Aren’t the Vectors You’re Looking For: A Proof of Quantum Advantage in Compositional Generalization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Karthik</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the QuantumNLP{:} Integrating Quantum Computing with Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Santanu</namePart>
<namePart type="family">Pal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Partha</namePart>
<namePart type="family">Pakray</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Priyanka</namePart>
<namePart type="family">Jain</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Asif</namePart>
<namePart type="family">Ekbal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sivaji</namePart>
<namePart type="family">Bandyopadhyay</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mumbai, India (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-306-7</identifier>
</relatedItem>
<abstract>Compositional generalization, the ability to systematically combine known concepts to understand and produce novel expressions, remains a fundamental, unsolved challenge for classical neural language models, whose reliance on statistical correlations in high-dimensional vector spaces inherently limits them. This paper establishes the first rigorous theoretical guarantee of an exponential quantum advantage for compositional generalization. We prove that classical language models, which represent concepts as vectors in \mathbbR^d, require a latent dimension scaling linearly with the number of concepts and compositional rules to avoid catastrophic interference. In contrast, we introduce the Quantum Compositional Embedding (QCE) framework, which leverages the intrinsic properties of quantum mechanics. In doing so, we demonstrate that QCE, utilizing only a logarithmic number of qubits, can perfectly represent and generalize compositional structures, a task provably impossible for classical models of equivalent dimensionality. The separation is proven to be exponential, providing a compelling theoretical foundation for quantum natural language processing.</abstract>
<identifier type="citekey">srikumar-2025-arent</identifier>
<location>
<url>https://aclanthology.org/2025.quantumnlp-1.2/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>6</start>
<end>9</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T These Aren’t the Vectors You’re Looking For: A Proof of Quantum Advantage in Compositional Generalization
%A Srikumar, Karthik
%Y Pal, Santanu
%Y Pakray, Partha
%Y Jain, Priyanka
%Y Ekbal, Asif
%Y Bandyopadhyay, Sivaji
%S Proceedings of the QuantumNLP{:} Integrating Quantum Computing with Natural Language Processing
%D 2025
%8 November
%I Association for Computational Linguistics
%C Mumbai, India (Hybrid)
%@ 979-8-89176-306-7
%F srikumar-2025-arent
%X Compositional generalization, the ability to systematically combine known concepts to understand and produce novel expressions, remains a fundamental, unsolved challenge for classical neural language models, whose reliance on statistical correlations in high-dimensional vector spaces inherently limits them. This paper establishes the first rigorous theoretical guarantee of an exponential quantum advantage for compositional generalization. We prove that classical language models, which represent concepts as vectors in \mathbbR^d, require a latent dimension scaling linearly with the number of concepts and compositional rules to avoid catastrophic interference. In contrast, we introduce the Quantum Compositional Embedding (QCE) framework, which leverages the intrinsic properties of quantum mechanics. In doing so, we demonstrate that QCE, utilizing only a logarithmic number of qubits, can perfectly represent and generalize compositional structures, a task provably impossible for classical models of equivalent dimensionality. The separation is proven to be exponential, providing a compelling theoretical foundation for quantum natural language processing.
%U https://aclanthology.org/2025.quantumnlp-1.2/
%P 6-9Markdown (Informal)
[These Aren’t the Vectors You’re Looking For: A Proof of Quantum Advantage in Compositional Generalization](https://aclanthology.org/2025.quantumnlp-1.2/) (Srikumar, QuantumNLP 2025)
ACL