@inproceedings{oikawa-etal-2022-stacking,
title = "A Stacking-based Efficient Method for Toxic Language Detection on Live Streaming Chat",
author = "Oikawa, Yuto and
Nakayama, Yuki and
Murakami, Koji",
editor = "Li, Yunyao and
Lazaridou, Angeliki",
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track",
month = dec,
year = "2022",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.emnlp-industry.58",
doi = "10.18653/v1/2022.emnlp-industry.58",
pages = "571--578",
abstract = "In a live streaming chat on a video streaming service, it is crucial to filter out toxic comments with online processing to prevent users from reading comments in real-time. However, recent toxic language detection methods rely on deep learning methods, which can not be scalable considering inference speed. Also, these methods do not consider constraints of computational resources expected depending on a deployed system (e.g., no GPU resource).This paper presents an efficient method for toxic language detection that is aware of real-world scenarios. Our proposed architecture is based on partial stacking that feeds initial results with low confidence to meta-classifier. Experimental results show that our method achieves a much faster inference speed than BERT-based models with comparable performance.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="oikawa-etal-2022-stacking">
<titleInfo>
<title>A Stacking-based Efficient Method for Toxic Language Detection on Live Streaming Chat</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yuto</namePart>
<namePart type="family">Oikawa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yuki</namePart>
<namePart type="family">Nakayama</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Koji</namePart>
<namePart type="family">Murakami</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yunyao</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Angeliki</namePart>
<namePart type="family">Lazaridou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, UAE</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In a live streaming chat on a video streaming service, it is crucial to filter out toxic comments with online processing to prevent users from reading comments in real-time. However, recent toxic language detection methods rely on deep learning methods, which can not be scalable considering inference speed. Also, these methods do not consider constraints of computational resources expected depending on a deployed system (e.g., no GPU resource).This paper presents an efficient method for toxic language detection that is aware of real-world scenarios. Our proposed architecture is based on partial stacking that feeds initial results with low confidence to meta-classifier. Experimental results show that our method achieves a much faster inference speed than BERT-based models with comparable performance.</abstract>
<identifier type="citekey">oikawa-etal-2022-stacking</identifier>
<identifier type="doi">10.18653/v1/2022.emnlp-industry.58</identifier>
<location>
<url>https://aclanthology.org/2022.emnlp-industry.58</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>571</start>
<end>578</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Stacking-based Efficient Method for Toxic Language Detection on Live Streaming Chat
%A Oikawa, Yuto
%A Nakayama, Yuki
%A Murakami, Koji
%Y Li, Yunyao
%Y Lazaridou, Angeliki
%S Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, UAE
%F oikawa-etal-2022-stacking
%X In a live streaming chat on a video streaming service, it is crucial to filter out toxic comments with online processing to prevent users from reading comments in real-time. However, recent toxic language detection methods rely on deep learning methods, which can not be scalable considering inference speed. Also, these methods do not consider constraints of computational resources expected depending on a deployed system (e.g., no GPU resource).This paper presents an efficient method for toxic language detection that is aware of real-world scenarios. Our proposed architecture is based on partial stacking that feeds initial results with low confidence to meta-classifier. Experimental results show that our method achieves a much faster inference speed than BERT-based models with comparable performance.
%R 10.18653/v1/2022.emnlp-industry.58
%U https://aclanthology.org/2022.emnlp-industry.58
%U https://doi.org/10.18653/v1/2022.emnlp-industry.58
%P 571-578
Markdown (Informal)
[A Stacking-based Efficient Method for Toxic Language Detection on Live Streaming Chat](https://aclanthology.org/2022.emnlp-industry.58) (Oikawa et al., EMNLP 2022)
ACL