@inproceedings{jarquin-vasquez-etal-2021-self,
title = "Self-Contextualized Attention for Abusive Language Identification",
author = "Jarqu{\'\i}n-V{\'a}squez, Horacio and
Escalante, Hugo Jair and
Montes, Manuel",
editor = "Ku, Lun-Wei and
Li, Cheng-Te",
booktitle = "Proceedings of the Ninth International Workshop on Natural Language Processing for Social Media",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.socialnlp-1.9",
doi = "10.18653/v1/2021.socialnlp-1.9",
pages = "103--112",
abstract = "The use of attention mechanisms in deep learning approaches has become popular in natural language processing due to its outstanding performance. The use of these mechanisms allows one managing the importance of the elements of a sequence in accordance to their context, however, this importance has been observed independently between the pairs of elements of a sequence (self-attention) and between the application domain of a sequence (contextual attention), leading to the loss of relevant information and limiting the representation of the sequences. To tackle these particular issues we propose the self-contextualized attention mechanism, which trades off the previous limitations, by considering the internal and contextual relationships between the elements of a sequence. The proposed mechanism was evaluated in four standard collections for the abusive language identification task achieving encouraging results. It outperformed the current attention mechanisms and showed a competitive performance with respect to state-of-the-art approaches.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jarquin-vasquez-etal-2021-self">
<titleInfo>
<title>Self-Contextualized Attention for Abusive Language Identification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Horacio</namePart>
<namePart type="family">Jarquín-Vásquez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hugo</namePart>
<namePart type="given">Jair</namePart>
<namePart type="family">Escalante</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Manuel</namePart>
<namePart type="family">Montes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Ninth International Workshop on Natural Language Processing for Social Media</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lun-Wei</namePart>
<namePart type="family">Ku</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cheng-Te</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The use of attention mechanisms in deep learning approaches has become popular in natural language processing due to its outstanding performance. The use of these mechanisms allows one managing the importance of the elements of a sequence in accordance to their context, however, this importance has been observed independently between the pairs of elements of a sequence (self-attention) and between the application domain of a sequence (contextual attention), leading to the loss of relevant information and limiting the representation of the sequences. To tackle these particular issues we propose the self-contextualized attention mechanism, which trades off the previous limitations, by considering the internal and contextual relationships between the elements of a sequence. The proposed mechanism was evaluated in four standard collections for the abusive language identification task achieving encouraging results. It outperformed the current attention mechanisms and showed a competitive performance with respect to state-of-the-art approaches.</abstract>
<identifier type="citekey">jarquin-vasquez-etal-2021-self</identifier>
<identifier type="doi">10.18653/v1/2021.socialnlp-1.9</identifier>
<location>
<url>https://aclanthology.org/2021.socialnlp-1.9</url>
</location>
<part>
<date>2021-06</date>
<extent unit="page">
<start>103</start>
<end>112</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Self-Contextualized Attention for Abusive Language Identification
%A Jarquín-Vásquez, Horacio
%A Escalante, Hugo Jair
%A Montes, Manuel
%Y Ku, Lun-Wei
%Y Li, Cheng-Te
%S Proceedings of the Ninth International Workshop on Natural Language Processing for Social Media
%D 2021
%8 June
%I Association for Computational Linguistics
%C Online
%F jarquin-vasquez-etal-2021-self
%X The use of attention mechanisms in deep learning approaches has become popular in natural language processing due to its outstanding performance. The use of these mechanisms allows one managing the importance of the elements of a sequence in accordance to their context, however, this importance has been observed independently between the pairs of elements of a sequence (self-attention) and between the application domain of a sequence (contextual attention), leading to the loss of relevant information and limiting the representation of the sequences. To tackle these particular issues we propose the self-contextualized attention mechanism, which trades off the previous limitations, by considering the internal and contextual relationships between the elements of a sequence. The proposed mechanism was evaluated in four standard collections for the abusive language identification task achieving encouraging results. It outperformed the current attention mechanisms and showed a competitive performance with respect to state-of-the-art approaches.
%R 10.18653/v1/2021.socialnlp-1.9
%U https://aclanthology.org/2021.socialnlp-1.9
%U https://doi.org/10.18653/v1/2021.socialnlp-1.9
%P 103-112
Markdown (Informal)
[Self-Contextualized Attention for Abusive Language Identification](https://aclanthology.org/2021.socialnlp-1.9) (Jarquín-Vásquez et al., SocialNLP 2021)
ACL