@inproceedings{liza-grzes-2019-relating,
title = "Relating {RNN} Layers with the Spectral {WFA} Ranks in Sequence Modelling",
author = "Liza, Farhana Ferdousi and
Grzes, Marek",
editor = "Eisner, Jason and
Gall{\'e}, Matthias and
Heinz, Jeffrey and
Quattoni, Ariadna and
Rabusseau, Guillaume",
booktitle = "Proceedings of the Workshop on Deep Learning and Formal Languages: Building Bridges",
month = aug,
year = "2019",
address = "Florence",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-3903",
doi = "10.18653/v1/W19-3903",
pages = "24--33",
abstract = "We analyse Recurrent Neural Networks (RNNs) to understand the significance of multiple LSTM layers. We argue that the Weighted Finite-state Automata (WFA) trained using a spectral learning algorithm are helpful to analyse RNNs. Our results suggest that multiple LSTM layers in RNNs help learning distributed hidden states, but have a smaller impact on the ability to learn long-term dependencies. The analysis is based on the empirical results, however relevant theory (whenever possible) was discussed to justify and support our conclusions.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="liza-grzes-2019-relating">
<titleInfo>
<title>Relating RNN Layers with the Spectral WFA Ranks in Sequence Modelling</title>
</titleInfo>
<name type="personal">
<namePart type="given">Farhana</namePart>
<namePart type="given">Ferdousi</namePart>
<namePart type="family">Liza</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marek</namePart>
<namePart type="family">Grzes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on Deep Learning and Formal Languages: Building Bridges</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jason</namePart>
<namePart type="family">Eisner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Matthias</namePart>
<namePart type="family">Gallé</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jeffrey</namePart>
<namePart type="family">Heinz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ariadna</namePart>
<namePart type="family">Quattoni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guillaume</namePart>
<namePart type="family">Rabusseau</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Florence</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We analyse Recurrent Neural Networks (RNNs) to understand the significance of multiple LSTM layers. We argue that the Weighted Finite-state Automata (WFA) trained using a spectral learning algorithm are helpful to analyse RNNs. Our results suggest that multiple LSTM layers in RNNs help learning distributed hidden states, but have a smaller impact on the ability to learn long-term dependencies. The analysis is based on the empirical results, however relevant theory (whenever possible) was discussed to justify and support our conclusions.</abstract>
<identifier type="citekey">liza-grzes-2019-relating</identifier>
<identifier type="doi">10.18653/v1/W19-3903</identifier>
<location>
<url>https://aclanthology.org/W19-3903</url>
</location>
<part>
<date>2019-08</date>
<extent unit="page">
<start>24</start>
<end>33</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Relating RNN Layers with the Spectral WFA Ranks in Sequence Modelling
%A Liza, Farhana Ferdousi
%A Grzes, Marek
%Y Eisner, Jason
%Y Gallé, Matthias
%Y Heinz, Jeffrey
%Y Quattoni, Ariadna
%Y Rabusseau, Guillaume
%S Proceedings of the Workshop on Deep Learning and Formal Languages: Building Bridges
%D 2019
%8 August
%I Association for Computational Linguistics
%C Florence
%F liza-grzes-2019-relating
%X We analyse Recurrent Neural Networks (RNNs) to understand the significance of multiple LSTM layers. We argue that the Weighted Finite-state Automata (WFA) trained using a spectral learning algorithm are helpful to analyse RNNs. Our results suggest that multiple LSTM layers in RNNs help learning distributed hidden states, but have a smaller impact on the ability to learn long-term dependencies. The analysis is based on the empirical results, however relevant theory (whenever possible) was discussed to justify and support our conclusions.
%R 10.18653/v1/W19-3903
%U https://aclanthology.org/W19-3903
%U https://doi.org/10.18653/v1/W19-3903
%P 24-33
Markdown (Informal)
[Relating RNN Layers with the Spectral WFA Ranks in Sequence Modelling](https://aclanthology.org/W19-3903) (Liza & Grzes, ACL 2019)
ACL