@inproceedings{lamarre-etal-2022-attention,
title = "Attention weights accurately predict language representations in the brain",
author = "Lamarre, Mathis and
Chen, Catherine and
Deniz, Fatma",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.findings-emnlp.330",
doi = "10.18653/v1/2022.findings-emnlp.330",
pages = "4513--4529",
abstract = "In Transformer-based language models (LMs) the attention mechanism converts token embeddings into contextual embeddings that incorporate information from neighboring words. The resulting contextual hidden state embeddings have enabled highly accurate models of brain responses, suggesting that the attention mechanism constructs contextual embeddings that carry information reflected in language-related brain representations. However, it is unclear whether the attention weights that are used to integrate information across words are themselves related to language representations in the brain. To address this question we analyzed functional magnetic resonance imaging (fMRI) recordings of participants reading English language narratives. We provided the narrative text as input to two LMs (BERT and GPT-2) and extracted their corresponding attention weights. We then used encoding models to determine how well attention weights can predict recorded brain responses. We find that attention weights accurately predict brain responses in much of the frontal and temporal cortices. Our results suggest that the attention mechanism itself carries information that is reflected in brain representations. Moreover, these results indicate cortical areas in which context integration may occur.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lamarre-etal-2022-attention">
<titleInfo>
<title>Attention weights accurately predict language representations in the brain</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mathis</namePart>
<namePart type="family">Lamarre</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Catherine</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fatma</namePart>
<namePart type="family">Deniz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2022</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Goldberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zornitsa</namePart>
<namePart type="family">Kozareva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yue</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In Transformer-based language models (LMs) the attention mechanism converts token embeddings into contextual embeddings that incorporate information from neighboring words. The resulting contextual hidden state embeddings have enabled highly accurate models of brain responses, suggesting that the attention mechanism constructs contextual embeddings that carry information reflected in language-related brain representations. However, it is unclear whether the attention weights that are used to integrate information across words are themselves related to language representations in the brain. To address this question we analyzed functional magnetic resonance imaging (fMRI) recordings of participants reading English language narratives. We provided the narrative text as input to two LMs (BERT and GPT-2) and extracted their corresponding attention weights. We then used encoding models to determine how well attention weights can predict recorded brain responses. We find that attention weights accurately predict brain responses in much of the frontal and temporal cortices. Our results suggest that the attention mechanism itself carries information that is reflected in brain representations. Moreover, these results indicate cortical areas in which context integration may occur.</abstract>
<identifier type="citekey">lamarre-etal-2022-attention</identifier>
<identifier type="doi">10.18653/v1/2022.findings-emnlp.330</identifier>
<location>
<url>https://aclanthology.org/2022.findings-emnlp.330</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>4513</start>
<end>4529</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Attention weights accurately predict language representations in the brain
%A Lamarre, Mathis
%A Chen, Catherine
%A Deniz, Fatma
%Y Goldberg, Yoav
%Y Kozareva, Zornitsa
%Y Zhang, Yue
%S Findings of the Association for Computational Linguistics: EMNLP 2022
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates
%F lamarre-etal-2022-attention
%X In Transformer-based language models (LMs) the attention mechanism converts token embeddings into contextual embeddings that incorporate information from neighboring words. The resulting contextual hidden state embeddings have enabled highly accurate models of brain responses, suggesting that the attention mechanism constructs contextual embeddings that carry information reflected in language-related brain representations. However, it is unclear whether the attention weights that are used to integrate information across words are themselves related to language representations in the brain. To address this question we analyzed functional magnetic resonance imaging (fMRI) recordings of participants reading English language narratives. We provided the narrative text as input to two LMs (BERT and GPT-2) and extracted their corresponding attention weights. We then used encoding models to determine how well attention weights can predict recorded brain responses. We find that attention weights accurately predict brain responses in much of the frontal and temporal cortices. Our results suggest that the attention mechanism itself carries information that is reflected in brain representations. Moreover, these results indicate cortical areas in which context integration may occur.
%R 10.18653/v1/2022.findings-emnlp.330
%U https://aclanthology.org/2022.findings-emnlp.330
%U https://doi.org/10.18653/v1/2022.findings-emnlp.330
%P 4513-4529
Markdown (Informal)
[Attention weights accurately predict language representations in the brain](https://aclanthology.org/2022.findings-emnlp.330) (Lamarre et al., Findings 2022)
ACL