@inproceedings{upadhyay-etal-2022-towards,
title = "Towards Toxic Positivity Detection",
author = "Upadhyay, Ishan Sanjeev and
Srivatsa, KV Aditya and
Mamidi, Radhika",
editor = "Ku, Lun-Wei and
Li, Cheng-Te and
Tsai, Yu-Che and
Wang, Wei-Yao",
booktitle = "Proceedings of the Tenth International Workshop on Natural Language Processing for Social Media",
month = jul,
year = "2022",
address = "Seattle, Washington",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.socialnlp-1.7",
doi = "10.18653/v1/2022.socialnlp-1.7",
pages = "75--82",
abstract = "Over the past few years, there has been a growing concern around toxic positivity on social media which is a phenomenon where positivity is used to minimize one{'}s emotional experience. In this paper, we create a dataset for toxic positivity classification from Twitter and an inspirational quote website. We then perform benchmarking experiments using various text classification models and show the suitability of these models for the task. We achieved a macro F1 score of 0.71 and a weighted F1 score of 0.85 by using an ensemble model. To the best of our knowledge, our dataset is the first such dataset created.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="upadhyay-etal-2022-towards">
<titleInfo>
<title>Towards Toxic Positivity Detection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ishan</namePart>
<namePart type="given">Sanjeev</namePart>
<namePart type="family">Upadhyay</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">KV</namePart>
<namePart type="given">Aditya</namePart>
<namePart type="family">Srivatsa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Radhika</namePart>
<namePart type="family">Mamidi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Tenth International Workshop on Natural Language Processing for Social Media</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lun-Wei</namePart>
<namePart type="family">Ku</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cheng-Te</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yu-Che</namePart>
<namePart type="family">Tsai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wei-Yao</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, Washington</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Over the past few years, there has been a growing concern around toxic positivity on social media which is a phenomenon where positivity is used to minimize one’s emotional experience. In this paper, we create a dataset for toxic positivity classification from Twitter and an inspirational quote website. We then perform benchmarking experiments using various text classification models and show the suitability of these models for the task. We achieved a macro F1 score of 0.71 and a weighted F1 score of 0.85 by using an ensemble model. To the best of our knowledge, our dataset is the first such dataset created.</abstract>
<identifier type="citekey">upadhyay-etal-2022-towards</identifier>
<identifier type="doi">10.18653/v1/2022.socialnlp-1.7</identifier>
<location>
<url>https://aclanthology.org/2022.socialnlp-1.7</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>75</start>
<end>82</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Towards Toxic Positivity Detection
%A Upadhyay, Ishan Sanjeev
%A Srivatsa, KV Aditya
%A Mamidi, Radhika
%Y Ku, Lun-Wei
%Y Li, Cheng-Te
%Y Tsai, Yu-Che
%Y Wang, Wei-Yao
%S Proceedings of the Tenth International Workshop on Natural Language Processing for Social Media
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, Washington
%F upadhyay-etal-2022-towards
%X Over the past few years, there has been a growing concern around toxic positivity on social media which is a phenomenon where positivity is used to minimize one’s emotional experience. In this paper, we create a dataset for toxic positivity classification from Twitter and an inspirational quote website. We then perform benchmarking experiments using various text classification models and show the suitability of these models for the task. We achieved a macro F1 score of 0.71 and a weighted F1 score of 0.85 by using an ensemble model. To the best of our knowledge, our dataset is the first such dataset created.
%R 10.18653/v1/2022.socialnlp-1.7
%U https://aclanthology.org/2022.socialnlp-1.7
%U https://doi.org/10.18653/v1/2022.socialnlp-1.7
%P 75-82
Markdown (Informal)
[Towards Toxic Positivity Detection](https://aclanthology.org/2022.socialnlp-1.7) (Upadhyay et al., SocialNLP 2022)
ACL
- Ishan Sanjeev Upadhyay, KV Aditya Srivatsa, and Radhika Mamidi. 2022. Towards Toxic Positivity Detection. In Proceedings of the Tenth International Workshop on Natural Language Processing for Social Media, pages 75–82, Seattle, Washington. Association for Computational Linguistics.