@inproceedings{yang-etal-2023-towards-detecting,
title = "Towards Detecting Contextual Real-Time Toxicity for In-Game Chat",
author = "Yang, Zachary and
Grenon-Godbout, Nicolas and
Rabbany, Reihaneh",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-emnlp.663",
doi = "10.18653/v1/2023.findings-emnlp.663",
pages = "9894--9906",
abstract = "Real-time toxicity detection in online environments poses a significant challenge, due to the increasing prevalence of social media and gaming platforms. We introduce ToxBuster, a simple and scalable model that reliably detects toxic content in real-time for a line of chat by including chat history and metadata. ToxBuster consistently outperforms conventional toxicity models across popular multiplayer games, including Rainbow Six Siege, For Honor, and DOTA 2. We conduct an ablation study to assess the importance of each model component and explore ToxBuster{'}s transferability across the datasets. Furthermore, we showcase ToxBuster{'}s efficacy in post-game moderation, successfully flagging 82.1{\%} of chat-reported players at a precision level of 90.0{\%}. Additionally, we show how an additional 6{\%} of unreported toxic players can be proactively moderated.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yang-etal-2023-towards-detecting">
<titleInfo>
<title>Towards Detecting Contextual Real-Time Toxicity for In-Game Chat</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zachary</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nicolas</namePart>
<namePart type="family">Grenon-Godbout</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Reihaneh</namePart>
<namePart type="family">Rabbany</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Real-time toxicity detection in online environments poses a significant challenge, due to the increasing prevalence of social media and gaming platforms. We introduce ToxBuster, a simple and scalable model that reliably detects toxic content in real-time for a line of chat by including chat history and metadata. ToxBuster consistently outperforms conventional toxicity models across popular multiplayer games, including Rainbow Six Siege, For Honor, and DOTA 2. We conduct an ablation study to assess the importance of each model component and explore ToxBuster’s transferability across the datasets. Furthermore, we showcase ToxBuster’s efficacy in post-game moderation, successfully flagging 82.1% of chat-reported players at a precision level of 90.0%. Additionally, we show how an additional 6% of unreported toxic players can be proactively moderated.</abstract>
<identifier type="citekey">yang-etal-2023-towards-detecting</identifier>
<identifier type="doi">10.18653/v1/2023.findings-emnlp.663</identifier>
<location>
<url>https://aclanthology.org/2023.findings-emnlp.663</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>9894</start>
<end>9906</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Towards Detecting Contextual Real-Time Toxicity for In-Game Chat
%A Yang, Zachary
%A Grenon-Godbout, Nicolas
%A Rabbany, Reihaneh
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Findings of the Association for Computational Linguistics: EMNLP 2023
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F yang-etal-2023-towards-detecting
%X Real-time toxicity detection in online environments poses a significant challenge, due to the increasing prevalence of social media and gaming platforms. We introduce ToxBuster, a simple and scalable model that reliably detects toxic content in real-time for a line of chat by including chat history and metadata. ToxBuster consistently outperforms conventional toxicity models across popular multiplayer games, including Rainbow Six Siege, For Honor, and DOTA 2. We conduct an ablation study to assess the importance of each model component and explore ToxBuster’s transferability across the datasets. Furthermore, we showcase ToxBuster’s efficacy in post-game moderation, successfully flagging 82.1% of chat-reported players at a precision level of 90.0%. Additionally, we show how an additional 6% of unreported toxic players can be proactively moderated.
%R 10.18653/v1/2023.findings-emnlp.663
%U https://aclanthology.org/2023.findings-emnlp.663
%U https://doi.org/10.18653/v1/2023.findings-emnlp.663
%P 9894-9906
Markdown (Informal)
[Towards Detecting Contextual Real-Time Toxicity for In-Game Chat](https://aclanthology.org/2023.findings-emnlp.663) (Yang et al., Findings 2023)
ACL