@inproceedings{saphra-lopez-2020-lstms,
title = "{LSTM}s Compose{---}and {L}earn{---}{B}ottom-Up",
author = "Saphra, Naomi and
Lopez, Adam",
editor = "Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.findings-emnlp.252",
doi = "10.18653/v1/2020.findings-emnlp.252",
pages = "2797--2809",
abstract = "Recent work in NLP shows that LSTM language models capture compositional structure in language data. In contrast to existing work, we consider the \textit{learning} process that leads to compositional behavior. For a closer look at how an LSTM{'}s sequential representations are composed hierarchically, we present a related measure of Decompositional Interdependence (DI) between word meanings in an LSTM, based on their gate interactions. We support this measure with experiments on English language data, where DI is higher on pairs of words with lower syntactic distance. To explore the inductive biases that cause these compositional representations to arise during training, we conduct simple experiments on synthetic data. These synthetic experiments support a specific hypothesis about how hierarchical structures are discovered over the course of training: that LSTM constituent representations are learned bottom-up, relying on effective representations of their shorter children, rather than on learning the longer-range relations independently.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="saphra-lopez-2020-lstms">
<titleInfo>
<title>LSTMs Compose—and Learn—Bottom-Up</title>
</titleInfo>
<name type="personal">
<namePart type="given">Naomi</namePart>
<namePart type="family">Saphra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Adam</namePart>
<namePart type="family">Lopez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2020</title>
</titleInfo>
<name type="personal">
<namePart type="given">Trevor</namePart>
<namePart type="family">Cohn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulan</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent work in NLP shows that LSTM language models capture compositional structure in language data. In contrast to existing work, we consider the learning process that leads to compositional behavior. For a closer look at how an LSTM’s sequential representations are composed hierarchically, we present a related measure of Decompositional Interdependence (DI) between word meanings in an LSTM, based on their gate interactions. We support this measure with experiments on English language data, where DI is higher on pairs of words with lower syntactic distance. To explore the inductive biases that cause these compositional representations to arise during training, we conduct simple experiments on synthetic data. These synthetic experiments support a specific hypothesis about how hierarchical structures are discovered over the course of training: that LSTM constituent representations are learned bottom-up, relying on effective representations of their shorter children, rather than on learning the longer-range relations independently.</abstract>
<identifier type="citekey">saphra-lopez-2020-lstms</identifier>
<identifier type="doi">10.18653/v1/2020.findings-emnlp.252</identifier>
<location>
<url>https://aclanthology.org/2020.findings-emnlp.252</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>2797</start>
<end>2809</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T LSTMs Compose—and Learn—Bottom-Up
%A Saphra, Naomi
%A Lopez, Adam
%Y Cohn, Trevor
%Y He, Yulan
%Y Liu, Yang
%S Findings of the Association for Computational Linguistics: EMNLP 2020
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F saphra-lopez-2020-lstms
%X Recent work in NLP shows that LSTM language models capture compositional structure in language data. In contrast to existing work, we consider the learning process that leads to compositional behavior. For a closer look at how an LSTM’s sequential representations are composed hierarchically, we present a related measure of Decompositional Interdependence (DI) between word meanings in an LSTM, based on their gate interactions. We support this measure with experiments on English language data, where DI is higher on pairs of words with lower syntactic distance. To explore the inductive biases that cause these compositional representations to arise during training, we conduct simple experiments on synthetic data. These synthetic experiments support a specific hypothesis about how hierarchical structures are discovered over the course of training: that LSTM constituent representations are learned bottom-up, relying on effective representations of their shorter children, rather than on learning the longer-range relations independently.
%R 10.18653/v1/2020.findings-emnlp.252
%U https://aclanthology.org/2020.findings-emnlp.252
%U https://doi.org/10.18653/v1/2020.findings-emnlp.252
%P 2797-2809
Markdown (Informal)
[LSTMs Compose—and Learn—Bottom-Up](https://aclanthology.org/2020.findings-emnlp.252) (Saphra & Lopez, Findings 2020)
ACL
- Naomi Saphra and Adam Lopez. 2020. LSTMs Compose—and Learn—Bottom-Up. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 2797–2809, Online. Association for Computational Linguistics.