@inproceedings{merrill-2019-sequential,
title = "Sequential Neural Networks as Automata",
author = "Merrill, William",
editor = "Eisner, Jason and
Gall{\'e}, Matthias and
Heinz, Jeffrey and
Quattoni, Ariadna and
Rabusseau, Guillaume",
booktitle = "Proceedings of the Workshop on Deep Learning and Formal Languages: Building Bridges",
month = aug,
year = "2019",
address = "Florence",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-3901",
doi = "10.18653/v1/W19-3901",
pages = "1--13",
abstract = "This work attempts to explain the types of computation that neural networks can perform by relating them to automata. We first define what it means for a real-time network with bounded precision to accept a language. A measure of network memory follows from this definition. We then characterize the classes of languages acceptable by various recurrent networks, attention, and convolutional networks. We find that LSTMs function like counter machines and relate convolutional networks to the subregular hierarchy. Overall, this work attempts to increase our understanding and ability to interpret neural networks through the lens of theory. These theoretical insights help explain neural computation, as well as the relationship between neural networks and natural language grammar.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="merrill-2019-sequential">
<titleInfo>
<title>Sequential Neural Networks as Automata</title>
</titleInfo>
<name type="personal">
<namePart type="given">William</namePart>
<namePart type="family">Merrill</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on Deep Learning and Formal Languages: Building Bridges</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jason</namePart>
<namePart type="family">Eisner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Matthias</namePart>
<namePart type="family">Gallé</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jeffrey</namePart>
<namePart type="family">Heinz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ariadna</namePart>
<namePart type="family">Quattoni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guillaume</namePart>
<namePart type="family">Rabusseau</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Florence</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This work attempts to explain the types of computation that neural networks can perform by relating them to automata. We first define what it means for a real-time network with bounded precision to accept a language. A measure of network memory follows from this definition. We then characterize the classes of languages acceptable by various recurrent networks, attention, and convolutional networks. We find that LSTMs function like counter machines and relate convolutional networks to the subregular hierarchy. Overall, this work attempts to increase our understanding and ability to interpret neural networks through the lens of theory. These theoretical insights help explain neural computation, as well as the relationship between neural networks and natural language grammar.</abstract>
<identifier type="citekey">merrill-2019-sequential</identifier>
<identifier type="doi">10.18653/v1/W19-3901</identifier>
<location>
<url>https://aclanthology.org/W19-3901</url>
</location>
<part>
<date>2019-08</date>
<extent unit="page">
<start>1</start>
<end>13</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Sequential Neural Networks as Automata
%A Merrill, William
%Y Eisner, Jason
%Y Gallé, Matthias
%Y Heinz, Jeffrey
%Y Quattoni, Ariadna
%Y Rabusseau, Guillaume
%S Proceedings of the Workshop on Deep Learning and Formal Languages: Building Bridges
%D 2019
%8 August
%I Association for Computational Linguistics
%C Florence
%F merrill-2019-sequential
%X This work attempts to explain the types of computation that neural networks can perform by relating them to automata. We first define what it means for a real-time network with bounded precision to accept a language. A measure of network memory follows from this definition. We then characterize the classes of languages acceptable by various recurrent networks, attention, and convolutional networks. We find that LSTMs function like counter machines and relate convolutional networks to the subregular hierarchy. Overall, this work attempts to increase our understanding and ability to interpret neural networks through the lens of theory. These theoretical insights help explain neural computation, as well as the relationship between neural networks and natural language grammar.
%R 10.18653/v1/W19-3901
%U https://aclanthology.org/W19-3901
%U https://doi.org/10.18653/v1/W19-3901
%P 1-13
Markdown (Informal)
[Sequential Neural Networks as Automata](https://aclanthology.org/W19-3901) (Merrill, ACL 2019)
ACL
- William Merrill. 2019. Sequential Neural Networks as Automata. In Proceedings of the Workshop on Deep Learning and Formal Languages: Building Bridges, pages 1–13, Florence. Association for Computational Linguistics.