@inproceedings{chakrabarty-etal-2019-pay,
title = "Pay {``}Attention{''} to your Context when Classifying Abusive Language",
author = "Chakrabarty, Tuhin and
Gupta, Kilol and
Muresan, Smaranda",
editor = "Roberts, Sarah T. and
Tetreault, Joel and
Prabhakaran, Vinodkumar and
Waseem, Zeerak",
booktitle = "Proceedings of the Third Workshop on Abusive Language Online",
month = aug,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-3508",
doi = "10.18653/v1/W19-3508",
pages = "70--79",
abstract = "The goal of any social media platform is to facilitate healthy and meaningful interactions among its users. But more often than not, it has been found that it becomes an avenue for wanton attacks. We propose an experimental study that has three aims: 1) to provide us with a deeper understanding of current data sets that focus on different types of abusive language, which are sometimes overlapping (racism, sexism, hate speech, offensive language, and personal attacks); 2) to investigate what type of attention mechanism (contextual vs. self-attention) is better for abusive language detection using deep learning architectures; and 3) to investigate whether stacked architectures provide an advantage over simple architectures for this task.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chakrabarty-etal-2019-pay">
<titleInfo>
<title>Pay “Attention” to your Context when Classifying Abusive Language</title>
</titleInfo>
<name type="personal">
<namePart type="given">Tuhin</namePart>
<namePart type="family">Chakrabarty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kilol</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Workshop on Abusive Language Online</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sarah</namePart>
<namePart type="given">T</namePart>
<namePart type="family">Roberts</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joel</namePart>
<namePart type="family">Tetreault</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vinodkumar</namePart>
<namePart type="family">Prabhakaran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zeerak</namePart>
<namePart type="family">Waseem</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Florence, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The goal of any social media platform is to facilitate healthy and meaningful interactions among its users. But more often than not, it has been found that it becomes an avenue for wanton attacks. We propose an experimental study that has three aims: 1) to provide us with a deeper understanding of current data sets that focus on different types of abusive language, which are sometimes overlapping (racism, sexism, hate speech, offensive language, and personal attacks); 2) to investigate what type of attention mechanism (contextual vs. self-attention) is better for abusive language detection using deep learning architectures; and 3) to investigate whether stacked architectures provide an advantage over simple architectures for this task.</abstract>
<identifier type="citekey">chakrabarty-etal-2019-pay</identifier>
<identifier type="doi">10.18653/v1/W19-3508</identifier>
<location>
<url>https://aclanthology.org/W19-3508</url>
</location>
<part>
<date>2019-08</date>
<extent unit="page">
<start>70</start>
<end>79</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Pay “Attention” to your Context when Classifying Abusive Language
%A Chakrabarty, Tuhin
%A Gupta, Kilol
%A Muresan, Smaranda
%Y Roberts, Sarah T.
%Y Tetreault, Joel
%Y Prabhakaran, Vinodkumar
%Y Waseem, Zeerak
%S Proceedings of the Third Workshop on Abusive Language Online
%D 2019
%8 August
%I Association for Computational Linguistics
%C Florence, Italy
%F chakrabarty-etal-2019-pay
%X The goal of any social media platform is to facilitate healthy and meaningful interactions among its users. But more often than not, it has been found that it becomes an avenue for wanton attacks. We propose an experimental study that has three aims: 1) to provide us with a deeper understanding of current data sets that focus on different types of abusive language, which are sometimes overlapping (racism, sexism, hate speech, offensive language, and personal attacks); 2) to investigate what type of attention mechanism (contextual vs. self-attention) is better for abusive language detection using deep learning architectures; and 3) to investigate whether stacked architectures provide an advantage over simple architectures for this task.
%R 10.18653/v1/W19-3508
%U https://aclanthology.org/W19-3508
%U https://doi.org/10.18653/v1/W19-3508
%P 70-79
Markdown (Informal)
[Pay “Attention” to your Context when Classifying Abusive Language](https://aclanthology.org/W19-3508) (Chakrabarty et al., ALW 2019)
ACL