@article{hao-etal-2022-formal,
title = "Formal Language Recognition by Hard Attention Transformers: Perspectives from Circuit Complexity",
author = "Hao, Yiding and
Angluin, Dana and
Frank, Robert",
editor = "Roark, Brian and
Nenkova, Ani",
journal = "Transactions of the Association for Computational Linguistics",
volume = "10",
year = "2022",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/2022.tacl-1.46",
doi = "10.1162/tacl_a_00490",
pages = "800--810",
abstract = "This paper analyzes three formal models of Transformer encoders that differ in the form of their self-attention mechanism: unique hard attention (UHAT); generalized unique hard attention (GUHAT), which generalizes UHAT; and averaging hard attention (AHAT). We show that UHAT and GUHAT Transformers, viewed as string acceptors, can only recognize formal languages in the complexity class AC0, the class of languages recognizable by families of Boolean circuits of constant depth and polynomial size. This upper bound subsumes Hahn{'}s (2020) results that GUHAT cannot recognize the DYCK languages or the PARITY language, since those languages are outside AC0 (Furst et al., 1984). In contrast, the non-AC0 languages MAJORITY and DYCK-1 are recognizable by AHAT networks, implying that AHAT can recognize languages that UHAT and GUHAT cannot.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hao-etal-2022-formal">
<titleInfo>
<title>Formal Language Recognition by Hard Attention Transformers: Perspectives from Circuit Complexity</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yiding</namePart>
<namePart type="family">Hao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dana</namePart>
<namePart type="family">Angluin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Robert</namePart>
<namePart type="family">Frank</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Transactions of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>This paper analyzes three formal models of Transformer encoders that differ in the form of their self-attention mechanism: unique hard attention (UHAT); generalized unique hard attention (GUHAT), which generalizes UHAT; and averaging hard attention (AHAT). We show that UHAT and GUHAT Transformers, viewed as string acceptors, can only recognize formal languages in the complexity class AC0, the class of languages recognizable by families of Boolean circuits of constant depth and polynomial size. This upper bound subsumes Hahn’s (2020) results that GUHAT cannot recognize the DYCK languages or the PARITY language, since those languages are outside AC0 (Furst et al., 1984). In contrast, the non-AC0 languages MAJORITY and DYCK-1 are recognizable by AHAT networks, implying that AHAT can recognize languages that UHAT and GUHAT cannot.</abstract>
<identifier type="citekey">hao-etal-2022-formal</identifier>
<identifier type="doi">10.1162/tacl_a_00490</identifier>
<location>
<url>https://aclanthology.org/2022.tacl-1.46</url>
</location>
<part>
<date>2022</date>
<detail type="volume"><number>10</number></detail>
<extent unit="page">
<start>800</start>
<end>810</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T Formal Language Recognition by Hard Attention Transformers: Perspectives from Circuit Complexity
%A Hao, Yiding
%A Angluin, Dana
%A Frank, Robert
%J Transactions of the Association for Computational Linguistics
%D 2022
%V 10
%I MIT Press
%C Cambridge, MA
%F hao-etal-2022-formal
%X This paper analyzes three formal models of Transformer encoders that differ in the form of their self-attention mechanism: unique hard attention (UHAT); generalized unique hard attention (GUHAT), which generalizes UHAT; and averaging hard attention (AHAT). We show that UHAT and GUHAT Transformers, viewed as string acceptors, can only recognize formal languages in the complexity class AC0, the class of languages recognizable by families of Boolean circuits of constant depth and polynomial size. This upper bound subsumes Hahn’s (2020) results that GUHAT cannot recognize the DYCK languages or the PARITY language, since those languages are outside AC0 (Furst et al., 1984). In contrast, the non-AC0 languages MAJORITY and DYCK-1 are recognizable by AHAT networks, implying that AHAT can recognize languages that UHAT and GUHAT cannot.
%R 10.1162/tacl_a_00490
%U https://aclanthology.org/2022.tacl-1.46
%U https://doi.org/10.1162/tacl_a_00490
%P 800-810
Markdown (Informal)
[Formal Language Recognition by Hard Attention Transformers: Perspectives from Circuit Complexity](https://aclanthology.org/2022.tacl-1.46) (Hao et al., TACL 2022)
ACL