@inproceedings{matsumoto-etal-2022-tracing,
title = "Tracing and Manipulating intermediate values in Neural Math Problem Solvers",
author = "Matsumoto, Yuta and
Heinzerling, Benjamin and
Yoshikawa, Masashi and
Inui, Kentaro",
editor = "Ferreira, Deborah and
Valentino, Marco and
Freitas, Andre and
Welleck, Sean and
Schubotz, Moritz",
booktitle = "Proceedings of the 1st Workshop on Mathematical Natural Language Processing (MathNLP)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.mathnlp-1.1",
doi = "10.18653/v1/2022.mathnlp-1.1",
pages = "1--6",
abstract = "How language models process complex input that requires multiple steps of inference is not well understood. Previous research has shown that information about intermediate values of these inputs can be extracted from the activations of the models, but it is unclear where that information is encoded and whether that information is indeed used during inference. We introduce a method for analyzing how a Transformer model processes these inputs by focusing on simple arithmetic problems and their intermediate values. To trace where information about intermediate values is encoded, we measure the correlation between intermediate values and the activations of the model using principal component analysis (PCA). Then, we perform a causal intervention by manipulating model weights. This intervention shows that the weights identified via tracing are not merely correlated with intermediate values, but causally related to model predictions. Our findings show that the model has a locality to certain intermediate values, and this is useful for enhancing the interpretability of the models.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="matsumoto-etal-2022-tracing">
<titleInfo>
<title>Tracing and Manipulating intermediate values in Neural Math Problem Solvers</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yuta</namePart>
<namePart type="family">Matsumoto</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Benjamin</namePart>
<namePart type="family">Heinzerling</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Masashi</namePart>
<namePart type="family">Yoshikawa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kentaro</namePart>
<namePart type="family">Inui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Mathematical Natural Language Processing (MathNLP)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Deborah</namePart>
<namePart type="family">Ferreira</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marco</namePart>
<namePart type="family">Valentino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andre</namePart>
<namePart type="family">Freitas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sean</namePart>
<namePart type="family">Welleck</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Moritz</namePart>
<namePart type="family">Schubotz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>How language models process complex input that requires multiple steps of inference is not well understood. Previous research has shown that information about intermediate values of these inputs can be extracted from the activations of the models, but it is unclear where that information is encoded and whether that information is indeed used during inference. We introduce a method for analyzing how a Transformer model processes these inputs by focusing on simple arithmetic problems and their intermediate values. To trace where information about intermediate values is encoded, we measure the correlation between intermediate values and the activations of the model using principal component analysis (PCA). Then, we perform a causal intervention by manipulating model weights. This intervention shows that the weights identified via tracing are not merely correlated with intermediate values, but causally related to model predictions. Our findings show that the model has a locality to certain intermediate values, and this is useful for enhancing the interpretability of the models.</abstract>
<identifier type="citekey">matsumoto-etal-2022-tracing</identifier>
<identifier type="doi">10.18653/v1/2022.mathnlp-1.1</identifier>
<location>
<url>https://aclanthology.org/2022.mathnlp-1.1</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>1</start>
<end>6</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Tracing and Manipulating intermediate values in Neural Math Problem Solvers
%A Matsumoto, Yuta
%A Heinzerling, Benjamin
%A Yoshikawa, Masashi
%A Inui, Kentaro
%Y Ferreira, Deborah
%Y Valentino, Marco
%Y Freitas, Andre
%Y Welleck, Sean
%Y Schubotz, Moritz
%S Proceedings of the 1st Workshop on Mathematical Natural Language Processing (MathNLP)
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates (Hybrid)
%F matsumoto-etal-2022-tracing
%X How language models process complex input that requires multiple steps of inference is not well understood. Previous research has shown that information about intermediate values of these inputs can be extracted from the activations of the models, but it is unclear where that information is encoded and whether that information is indeed used during inference. We introduce a method for analyzing how a Transformer model processes these inputs by focusing on simple arithmetic problems and their intermediate values. To trace where information about intermediate values is encoded, we measure the correlation between intermediate values and the activations of the model using principal component analysis (PCA). Then, we perform a causal intervention by manipulating model weights. This intervention shows that the weights identified via tracing are not merely correlated with intermediate values, but causally related to model predictions. Our findings show that the model has a locality to certain intermediate values, and this is useful for enhancing the interpretability of the models.
%R 10.18653/v1/2022.mathnlp-1.1
%U https://aclanthology.org/2022.mathnlp-1.1
%U https://doi.org/10.18653/v1/2022.mathnlp-1.1
%P 1-6
Markdown (Informal)
[Tracing and Manipulating intermediate values in Neural Math Problem Solvers](https://aclanthology.org/2022.mathnlp-1.1) (Matsumoto et al., MathNLP 2022)
ACL