@inproceedings{hanna-mueller-2025-incremental,
title = "Incremental Sentence Processing Mechanisms in Autoregressive Transformer Language Models",
author = "Hanna, Michael and
Mueller, Aaron",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.naacl-long.164/",
doi = "10.18653/v1/2025.naacl-long.164",
pages = "3181--3203",
ISBN = "979-8-89176-189-6",
abstract = "Autoregressive transformer language models (LMs) possess strong syntactic abilities, often successfully handling phenomena from agreement to NPI licensing. However, the features they use to incrementally process their linguistic input are not well understood. In this paper, we fill this gap by studying the mechanisms underlying garden path sentence processing in LMs. Specifically, we ask: (1) Do LMs use syntactic features or shallow heuristics to perform incremental sentence processing? (2) Do LMs represent only one potential interpretation, or multiple? and (3) Do LMs reanalyze or repair their initial incorrect representations? To address these questions, we use sparse autoencoders to identify interpretable features that determine which continuation{---}and thus which reading{---}of a garden path sentence the LM prefers. We find that while many important features relate to syntactic structure, some reflect syntactically irrelevant heuristics. Moreover, though most active features correspond to one reading of the sentence, some features correspond to the other, suggesting that LMs assign weight to both possibilities. Finally, LMs fail to re-use features to answer follow-up questions."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hanna-mueller-2025-incremental">
<titleInfo>
<title>Incremental Sentence Processing Mechanisms in Autoregressive Transformer Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Hanna</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aaron</namePart>
<namePart type="family">Mueller</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Luis</namePart>
<namePart type="family">Chiruzzo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alan</namePart>
<namePart type="family">Ritter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lu</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Albuquerque, New Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-189-6</identifier>
</relatedItem>
<abstract>Autoregressive transformer language models (LMs) possess strong syntactic abilities, often successfully handling phenomena from agreement to NPI licensing. However, the features they use to incrementally process their linguistic input are not well understood. In this paper, we fill this gap by studying the mechanisms underlying garden path sentence processing in LMs. Specifically, we ask: (1) Do LMs use syntactic features or shallow heuristics to perform incremental sentence processing? (2) Do LMs represent only one potential interpretation, or multiple? and (3) Do LMs reanalyze or repair their initial incorrect representations? To address these questions, we use sparse autoencoders to identify interpretable features that determine which continuation—and thus which reading—of a garden path sentence the LM prefers. We find that while many important features relate to syntactic structure, some reflect syntactically irrelevant heuristics. Moreover, though most active features correspond to one reading of the sentence, some features correspond to the other, suggesting that LMs assign weight to both possibilities. Finally, LMs fail to re-use features to answer follow-up questions.</abstract>
<identifier type="citekey">hanna-mueller-2025-incremental</identifier>
<identifier type="doi">10.18653/v1/2025.naacl-long.164</identifier>
<location>
<url>https://aclanthology.org/2025.naacl-long.164/</url>
</location>
<part>
<date>2025-04</date>
<extent unit="page">
<start>3181</start>
<end>3203</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Incremental Sentence Processing Mechanisms in Autoregressive Transformer Language Models
%A Hanna, Michael
%A Mueller, Aaron
%Y Chiruzzo, Luis
%Y Ritter, Alan
%Y Wang, Lu
%S Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)
%D 2025
%8 April
%I Association for Computational Linguistics
%C Albuquerque, New Mexico
%@ 979-8-89176-189-6
%F hanna-mueller-2025-incremental
%X Autoregressive transformer language models (LMs) possess strong syntactic abilities, often successfully handling phenomena from agreement to NPI licensing. However, the features they use to incrementally process their linguistic input are not well understood. In this paper, we fill this gap by studying the mechanisms underlying garden path sentence processing in LMs. Specifically, we ask: (1) Do LMs use syntactic features or shallow heuristics to perform incremental sentence processing? (2) Do LMs represent only one potential interpretation, or multiple? and (3) Do LMs reanalyze or repair their initial incorrect representations? To address these questions, we use sparse autoencoders to identify interpretable features that determine which continuation—and thus which reading—of a garden path sentence the LM prefers. We find that while many important features relate to syntactic structure, some reflect syntactically irrelevant heuristics. Moreover, though most active features correspond to one reading of the sentence, some features correspond to the other, suggesting that LMs assign weight to both possibilities. Finally, LMs fail to re-use features to answer follow-up questions.
%R 10.18653/v1/2025.naacl-long.164
%U https://aclanthology.org/2025.naacl-long.164/
%U https://doi.org/10.18653/v1/2025.naacl-long.164
%P 3181-3203
Markdown (Informal)
[Incremental Sentence Processing Mechanisms in Autoregressive Transformer Language Models](https://aclanthology.org/2025.naacl-long.164/) (Hanna & Mueller, NAACL 2025)
ACL