@inproceedings{hussain-etal-2025-rebuttal,
title = "A rebuttal of two common deflationary stances against {LLM} cognition",
author = "Hussain, Zak and
Mata, Rui and
Wulff, Dirk U.",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-acl.1242/",
doi = "10.18653/v1/2025.findings-acl.1242",
pages = "24208--24213",
ISBN = "979-8-89176-256-5",
abstract = "Large language models (LLMs) are arguably the most predictive models of human cognition available. Despite their impressive human-alignment, LLMs are often labeled as ``*just* next-token predictors'' that purportedly fall short of genuine cognition. We argue that these deflationary claims need further justification. Drawing on prominent cognitive and artificial intelligence research, we critically evaluate two forms of ``Justaism'' that dismiss LLM cognition by labeling LLMs as ``just'' simplistic entities without specifying or substantiating the critical capacities these models supposedly lack. Our analysis highlights the need for a more measured discussion of LLM cognition, to better inform future research and the development of artificial intelligence."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hussain-etal-2025-rebuttal">
<titleInfo>
<title>A rebuttal of two common deflationary stances against LLM cognition</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zak</namePart>
<namePart type="family">Hussain</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rui</namePart>
<namePart type="family">Mata</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dirk</namePart>
<namePart type="given">U</namePart>
<namePart type="family">Wulff</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-256-5</identifier>
</relatedItem>
<abstract>Large language models (LLMs) are arguably the most predictive models of human cognition available. Despite their impressive human-alignment, LLMs are often labeled as “*just* next-token predictors” that purportedly fall short of genuine cognition. We argue that these deflationary claims need further justification. Drawing on prominent cognitive and artificial intelligence research, we critically evaluate two forms of “Justaism” that dismiss LLM cognition by labeling LLMs as “just” simplistic entities without specifying or substantiating the critical capacities these models supposedly lack. Our analysis highlights the need for a more measured discussion of LLM cognition, to better inform future research and the development of artificial intelligence.</abstract>
<identifier type="citekey">hussain-etal-2025-rebuttal</identifier>
<identifier type="doi">10.18653/v1/2025.findings-acl.1242</identifier>
<location>
<url>https://aclanthology.org/2025.findings-acl.1242/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>24208</start>
<end>24213</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A rebuttal of two common deflationary stances against LLM cognition
%A Hussain, Zak
%A Mata, Rui
%A Wulff, Dirk U.
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Findings of the Association for Computational Linguistics: ACL 2025
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-256-5
%F hussain-etal-2025-rebuttal
%X Large language models (LLMs) are arguably the most predictive models of human cognition available. Despite their impressive human-alignment, LLMs are often labeled as “*just* next-token predictors” that purportedly fall short of genuine cognition. We argue that these deflationary claims need further justification. Drawing on prominent cognitive and artificial intelligence research, we critically evaluate two forms of “Justaism” that dismiss LLM cognition by labeling LLMs as “just” simplistic entities without specifying or substantiating the critical capacities these models supposedly lack. Our analysis highlights the need for a more measured discussion of LLM cognition, to better inform future research and the development of artificial intelligence.
%R 10.18653/v1/2025.findings-acl.1242
%U https://aclanthology.org/2025.findings-acl.1242/
%U https://doi.org/10.18653/v1/2025.findings-acl.1242
%P 24208-24213
Markdown (Informal)
[A rebuttal of two common deflationary stances against LLM cognition](https://aclanthology.org/2025.findings-acl.1242/) (Hussain et al., Findings 2025)
ACL