@inproceedings{sun-lu-2022-implicit,
title = "Implicit n-grams Induced by Recurrence",
author = "Sun, Xiaobing and
Lu, Wei",
editor = "Carpuat, Marine and
de Marneffe, Marie-Catherine and
Meza Ruiz, Ivan Vladimir",
booktitle = "Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.naacl-main.117",
doi = "10.18653/v1/2022.naacl-main.117",
pages = "1624--1639",
abstract = "Although self-attention based models such as Transformers have achieved remarkable successes on natural language processing (NLP)tasks, recent studies reveal that they have limitations on modeling sequential transformations (Hahn, 2020), which may promptre-examinations of recurrent neural networks (RNNs) that demonstrated impressive results on handling sequential data. Despite manyprior attempts to interpret RNNs, their internal mechanisms have not been fully understood, and the question on how exactly they capturesequential features remains largely unclear. In this work, we present a study that shows there actually exist some explainable componentsthat reside within the hidden states, which are reminiscent of the classical n-grams features. We evaluated such extracted explainable features from trained RNNs on downstream sentiment analysis tasks and found they could be used to model interesting linguistic phenomena such as negation and intensification. Furthermore, we examined the efficacy of using such n-gram components alone as encoders on tasks such as sentiment analysis and language modeling, revealing they could be playing important roles in contributing to the overall performance of RNNs. We hope our findings could add interpretability to RNN architectures, and also provide inspirations for proposing new architectures for sequential data.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sun-lu-2022-implicit">
<titleInfo>
<title>Implicit n-grams Induced by Recurrence</title>
</titleInfo>
<name type="personal">
<namePart type="given">Xiaobing</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wei</namePart>
<namePart type="family">Lu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marine</namePart>
<namePart type="family">Carpuat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marie-Catherine</namePart>
<namePart type="family">de Marneffe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="given">Vladimir</namePart>
<namePart type="family">Meza Ruiz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, United States</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Although self-attention based models such as Transformers have achieved remarkable successes on natural language processing (NLP)tasks, recent studies reveal that they have limitations on modeling sequential transformations (Hahn, 2020), which may promptre-examinations of recurrent neural networks (RNNs) that demonstrated impressive results on handling sequential data. Despite manyprior attempts to interpret RNNs, their internal mechanisms have not been fully understood, and the question on how exactly they capturesequential features remains largely unclear. In this work, we present a study that shows there actually exist some explainable componentsthat reside within the hidden states, which are reminiscent of the classical n-grams features. We evaluated such extracted explainable features from trained RNNs on downstream sentiment analysis tasks and found they could be used to model interesting linguistic phenomena such as negation and intensification. Furthermore, we examined the efficacy of using such n-gram components alone as encoders on tasks such as sentiment analysis and language modeling, revealing they could be playing important roles in contributing to the overall performance of RNNs. We hope our findings could add interpretability to RNN architectures, and also provide inspirations for proposing new architectures for sequential data.</abstract>
<identifier type="citekey">sun-lu-2022-implicit</identifier>
<identifier type="doi">10.18653/v1/2022.naacl-main.117</identifier>
<location>
<url>https://aclanthology.org/2022.naacl-main.117</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>1624</start>
<end>1639</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Implicit n-grams Induced by Recurrence
%A Sun, Xiaobing
%A Lu, Wei
%Y Carpuat, Marine
%Y de Marneffe, Marie-Catherine
%Y Meza Ruiz, Ivan Vladimir
%S Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, United States
%F sun-lu-2022-implicit
%X Although self-attention based models such as Transformers have achieved remarkable successes on natural language processing (NLP)tasks, recent studies reveal that they have limitations on modeling sequential transformations (Hahn, 2020), which may promptre-examinations of recurrent neural networks (RNNs) that demonstrated impressive results on handling sequential data. Despite manyprior attempts to interpret RNNs, their internal mechanisms have not been fully understood, and the question on how exactly they capturesequential features remains largely unclear. In this work, we present a study that shows there actually exist some explainable componentsthat reside within the hidden states, which are reminiscent of the classical n-grams features. We evaluated such extracted explainable features from trained RNNs on downstream sentiment analysis tasks and found they could be used to model interesting linguistic phenomena such as negation and intensification. Furthermore, we examined the efficacy of using such n-gram components alone as encoders on tasks such as sentiment analysis and language modeling, revealing they could be playing important roles in contributing to the overall performance of RNNs. We hope our findings could add interpretability to RNN architectures, and also provide inspirations for proposing new architectures for sequential data.
%R 10.18653/v1/2022.naacl-main.117
%U https://aclanthology.org/2022.naacl-main.117
%U https://doi.org/10.18653/v1/2022.naacl-main.117
%P 1624-1639
Markdown (Informal)
[Implicit n-grams Induced by Recurrence](https://aclanthology.org/2022.naacl-main.117) (Sun & Lu, NAACL 2022)
ACL
- Xiaobing Sun and Wei Lu. 2022. Implicit n-grams Induced by Recurrence. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1624–1639, Seattle, United States. Association for Computational Linguistics.