@inproceedings{hayakawa-sato-2025-theoretical,
title = "Theoretical Analysis of Hierarchical Language Recognition and Generation by Transformers without Positional Encoding",
author = "Hayakawa, Daichi and
Sato, Issei",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.acl-long.1488/",
doi = "10.18653/v1/2025.acl-long.1488",
pages = "30777--30834",
ISBN = "979-8-89176-251-0",
abstract = "In this study, we provide constructive proof that Transformers can recognize and generate hierarchical language efficiently with respect to model size, even without the need for a specific positional encoding.Specifically, we show that causal masking and a starting token enable Transformers to compute positional information and depth within hierarchical structures.We demonstrate that Transformers without positional encoding can generate hierarchical languages. Furthermore, we suggest that explicit positional encoding might have a detrimental effect on generalization with respect to sequence length."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hayakawa-sato-2025-theoretical">
<titleInfo>
<title>Theoretical Analysis of Hierarchical Language Recognition and Generation by Transformers without Positional Encoding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Daichi</namePart>
<namePart type="family">Hayakawa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Issei</namePart>
<namePart type="family">Sato</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-251-0</identifier>
</relatedItem>
<abstract>In this study, we provide constructive proof that Transformers can recognize and generate hierarchical language efficiently with respect to model size, even without the need for a specific positional encoding.Specifically, we show that causal masking and a starting token enable Transformers to compute positional information and depth within hierarchical structures.We demonstrate that Transformers without positional encoding can generate hierarchical languages. Furthermore, we suggest that explicit positional encoding might have a detrimental effect on generalization with respect to sequence length.</abstract>
<identifier type="citekey">hayakawa-sato-2025-theoretical</identifier>
<identifier type="doi">10.18653/v1/2025.acl-long.1488</identifier>
<location>
<url>https://aclanthology.org/2025.acl-long.1488/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>30777</start>
<end>30834</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Theoretical Analysis of Hierarchical Language Recognition and Generation by Transformers without Positional Encoding
%A Hayakawa, Daichi
%A Sato, Issei
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-251-0
%F hayakawa-sato-2025-theoretical
%X In this study, we provide constructive proof that Transformers can recognize and generate hierarchical language efficiently with respect to model size, even without the need for a specific positional encoding.Specifically, we show that causal masking and a starting token enable Transformers to compute positional information and depth within hierarchical structures.We demonstrate that Transformers without positional encoding can generate hierarchical languages. Furthermore, we suggest that explicit positional encoding might have a detrimental effect on generalization with respect to sequence length.
%R 10.18653/v1/2025.acl-long.1488
%U https://aclanthology.org/2025.acl-long.1488/
%U https://doi.org/10.18653/v1/2025.acl-long.1488
%P 30777-30834
Markdown (Informal)
[Theoretical Analysis of Hierarchical Language Recognition and Generation by Transformers without Positional Encoding](https://aclanthology.org/2025.acl-long.1488/) (Hayakawa & Sato, ACL 2025)
ACL