@inproceedings{tayyar-madabushi-bonial-2025-construction,
title = "Construction Grammar Evidence for How {LLM}s Use Context-Directed Extrapolation to Solve Tasks",
author = "Tayyar Madabushi, Harish and
Bonial, Claire",
editor = "Bonial, Claire and
Torgbi, Melissa and
Weissweiler, Leonie and
Blodgett, Austin and
Beuls, Katrien and
Van Eecke, Paul and
Tayyar Madabushi, Harish",
booktitle = "Proceedings of the Second International Workshop on Construction Grammars and NLP",
month = sep,
year = "2025",
address = {D{\"u}sseldorf, Germany},
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.cxgsnlp-1.20/",
pages = "190--201",
ISBN = "979-8-89176-318-0",
abstract = "In this paper, we apply the lens of Construction Grammar to provide linguistically-grounded evidence for the recently introduced view of LLMs that moves beyond the ``stochastic parrot'' and ``emergent Artificial General Intelligence'' extremes. We provide further evidence, this time rooted in linguistic theory, that the capabilities of LLMs are best explained by a process of context-directed extrapolation from their training priors. This mechanism, guided by in-context examples in base models or the prompt in instruction-tuned models, clarifies how LLM performance can exceed stochastic parroting without achieving the scalable, general-purpose reasoning seen in humans. Construction Grammar is uniquely suited to this investigation, as it provides a precise framework for testing the boundary between true generalization and sophisticated pattern-matching on novel linguistic tasks. The ramifications of this framework explaining LLM performance are three-fold: first, there is explanatory power providing insights into seemingly idiosyncratic LLM weaknesses and strengths; second, there are empowering methods for LLM users to improve performance of smaller models in post-training; third, there is a need to shift LLM evaluation paradigms so that LLMs are assessed relative to the prevalence of relevant priors in training data, and Construction Grammar provides a framework to create such evaluation data."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tayyar-madabushi-bonial-2025-construction">
<titleInfo>
<title>Construction Grammar Evidence for How LLMs Use Context-Directed Extrapolation to Solve Tasks</title>
</titleInfo>
<name type="personal">
<namePart type="given">Harish</namePart>
<namePart type="family">Tayyar Madabushi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Claire</namePart>
<namePart type="family">Bonial</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second International Workshop on Construction Grammars and NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Claire</namePart>
<namePart type="family">Bonial</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Melissa</namePart>
<namePart type="family">Torgbi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leonie</namePart>
<namePart type="family">Weissweiler</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Austin</namePart>
<namePart type="family">Blodgett</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Katrien</namePart>
<namePart type="family">Beuls</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Paul</namePart>
<namePart type="family">Van Eecke</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Harish</namePart>
<namePart type="family">Tayyar Madabushi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Düsseldorf, Germany</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-318-0</identifier>
</relatedItem>
<abstract>In this paper, we apply the lens of Construction Grammar to provide linguistically-grounded evidence for the recently introduced view of LLMs that moves beyond the “stochastic parrot” and “emergent Artificial General Intelligence” extremes. We provide further evidence, this time rooted in linguistic theory, that the capabilities of LLMs are best explained by a process of context-directed extrapolation from their training priors. This mechanism, guided by in-context examples in base models or the prompt in instruction-tuned models, clarifies how LLM performance can exceed stochastic parroting without achieving the scalable, general-purpose reasoning seen in humans. Construction Grammar is uniquely suited to this investigation, as it provides a precise framework for testing the boundary between true generalization and sophisticated pattern-matching on novel linguistic tasks. The ramifications of this framework explaining LLM performance are three-fold: first, there is explanatory power providing insights into seemingly idiosyncratic LLM weaknesses and strengths; second, there are empowering methods for LLM users to improve performance of smaller models in post-training; third, there is a need to shift LLM evaluation paradigms so that LLMs are assessed relative to the prevalence of relevant priors in training data, and Construction Grammar provides a framework to create such evaluation data.</abstract>
<identifier type="citekey">tayyar-madabushi-bonial-2025-construction</identifier>
<location>
<url>https://aclanthology.org/2025.cxgsnlp-1.20/</url>
</location>
<part>
<date>2025-09</date>
<extent unit="page">
<start>190</start>
<end>201</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Construction Grammar Evidence for How LLMs Use Context-Directed Extrapolation to Solve Tasks
%A Tayyar Madabushi, Harish
%A Bonial, Claire
%Y Bonial, Claire
%Y Torgbi, Melissa
%Y Weissweiler, Leonie
%Y Blodgett, Austin
%Y Beuls, Katrien
%Y Van Eecke, Paul
%Y Tayyar Madabushi, Harish
%S Proceedings of the Second International Workshop on Construction Grammars and NLP
%D 2025
%8 September
%I Association for Computational Linguistics
%C Düsseldorf, Germany
%@ 979-8-89176-318-0
%F tayyar-madabushi-bonial-2025-construction
%X In this paper, we apply the lens of Construction Grammar to provide linguistically-grounded evidence for the recently introduced view of LLMs that moves beyond the “stochastic parrot” and “emergent Artificial General Intelligence” extremes. We provide further evidence, this time rooted in linguistic theory, that the capabilities of LLMs are best explained by a process of context-directed extrapolation from their training priors. This mechanism, guided by in-context examples in base models or the prompt in instruction-tuned models, clarifies how LLM performance can exceed stochastic parroting without achieving the scalable, general-purpose reasoning seen in humans. Construction Grammar is uniquely suited to this investigation, as it provides a precise framework for testing the boundary between true generalization and sophisticated pattern-matching on novel linguistic tasks. The ramifications of this framework explaining LLM performance are three-fold: first, there is explanatory power providing insights into seemingly idiosyncratic LLM weaknesses and strengths; second, there are empowering methods for LLM users to improve performance of smaller models in post-training; third, there is a need to shift LLM evaluation paradigms so that LLMs are assessed relative to the prevalence of relevant priors in training data, and Construction Grammar provides a framework to create such evaluation data.
%U https://aclanthology.org/2025.cxgsnlp-1.20/
%P 190-201
Markdown (Informal)
[Construction Grammar Evidence for How LLMs Use Context-Directed Extrapolation to Solve Tasks](https://aclanthology.org/2025.cxgsnlp-1.20/) (Tayyar Madabushi & Bonial, CxGsNLP 2025)
ACL