@inproceedings{hashemzadeh-etal-2020-language,
title = "From Language to Language-ish: How Brain-Like is an {LSTM}{'}s Representation of Nonsensical Language Stimuli?",
author = "Hashemzadeh, Maryam and
Kaufeld, Greta and
White, Martha and
Martin, Andrea E. and
Fyshe, Alona",
editor = "Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.findings-emnlp.57",
doi = "10.18653/v1/2020.findings-emnlp.57",
pages = "645--656",
abstract = "The representations generated by many models of language (word embeddings, recurrent neural networks and transformers) correlate to brain activity recorded while people read. However, these decoding results are usually based on the brain{'}s reaction to syntactically and semantically sound language stimuli. In this study, we asked: how does an LSTM (long short term memory) language model, trained (by and large) on semantically and syntactically intact language, represent a language sample with degraded semantic or syntactic information? Does the LSTM representation still resemble the brain{'}s reaction? We found that, even for some kinds of nonsensical language, there is a statistically significant relationship between the brain{'}s activity and the representations of an LSTM. This indicates that, at least in some instances, LSTMs and the human brain handle nonsensical data similarly.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hashemzadeh-etal-2020-language">
<titleInfo>
<title>From Language to Language-ish: How Brain-Like is an LSTM’s Representation of Nonsensical Language Stimuli?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Maryam</namePart>
<namePart type="family">Hashemzadeh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Greta</namePart>
<namePart type="family">Kaufeld</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Martha</namePart>
<namePart type="family">White</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andrea</namePart>
<namePart type="given">E</namePart>
<namePart type="family">Martin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alona</namePart>
<namePart type="family">Fyshe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2020</title>
</titleInfo>
<name type="personal">
<namePart type="given">Trevor</namePart>
<namePart type="family">Cohn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulan</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The representations generated by many models of language (word embeddings, recurrent neural networks and transformers) correlate to brain activity recorded while people read. However, these decoding results are usually based on the brain’s reaction to syntactically and semantically sound language stimuli. In this study, we asked: how does an LSTM (long short term memory) language model, trained (by and large) on semantically and syntactically intact language, represent a language sample with degraded semantic or syntactic information? Does the LSTM representation still resemble the brain’s reaction? We found that, even for some kinds of nonsensical language, there is a statistically significant relationship between the brain’s activity and the representations of an LSTM. This indicates that, at least in some instances, LSTMs and the human brain handle nonsensical data similarly.</abstract>
<identifier type="citekey">hashemzadeh-etal-2020-language</identifier>
<identifier type="doi">10.18653/v1/2020.findings-emnlp.57</identifier>
<location>
<url>https://aclanthology.org/2020.findings-emnlp.57</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>645</start>
<end>656</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T From Language to Language-ish: How Brain-Like is an LSTM’s Representation of Nonsensical Language Stimuli?
%A Hashemzadeh, Maryam
%A Kaufeld, Greta
%A White, Martha
%A Martin, Andrea E.
%A Fyshe, Alona
%Y Cohn, Trevor
%Y He, Yulan
%Y Liu, Yang
%S Findings of the Association for Computational Linguistics: EMNLP 2020
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F hashemzadeh-etal-2020-language
%X The representations generated by many models of language (word embeddings, recurrent neural networks and transformers) correlate to brain activity recorded while people read. However, these decoding results are usually based on the brain’s reaction to syntactically and semantically sound language stimuli. In this study, we asked: how does an LSTM (long short term memory) language model, trained (by and large) on semantically and syntactically intact language, represent a language sample with degraded semantic or syntactic information? Does the LSTM representation still resemble the brain’s reaction? We found that, even for some kinds of nonsensical language, there is a statistically significant relationship between the brain’s activity and the representations of an LSTM. This indicates that, at least in some instances, LSTMs and the human brain handle nonsensical data similarly.
%R 10.18653/v1/2020.findings-emnlp.57
%U https://aclanthology.org/2020.findings-emnlp.57
%U https://doi.org/10.18653/v1/2020.findings-emnlp.57
%P 645-656
Markdown (Informal)
[From Language to Language-ish: How Brain-Like is an LSTM’s Representation of Nonsensical Language Stimuli?](https://aclanthology.org/2020.findings-emnlp.57) (Hashemzadeh et al., Findings 2020)
ACL