@inproceedings{bose-etal-2023-detoxifying,
title = "Detoxifying Online Discourse: A Guided Response Generation Approach for Reducing Toxicity in User-Generated Text",
author = "Bose, Ritwik and
Perera, Ian and
Dorr, Bonnie",
editor = "Chawla, Kushal and
Shi, Weiyan",
booktitle = "Proceedings of the First Workshop on Social Influence in Conversations (SICon 2023)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.sicon-1.2",
doi = "10.18653/v1/2023.sicon-1.2",
pages = "9--14",
abstract = "The expression of opinions, stances, and moral foundations on social media often coincide with toxic, divisive, or inflammatory language that can make constructive discourse across communities difficult. Natural language generation methods could provide a means to reframe or reword such expressions in a way that fosters more civil discourse, yet current Large Language Model (LLM) methods tend towards language that is too generic or formal to seem authentic for social media discussions. We present preliminary work on training LLMs to maintain authenticity while presenting a community{'}s ideas and values in a constructive, non-toxic manner.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bose-etal-2023-detoxifying">
<titleInfo>
<title>Detoxifying Online Discourse: A Guided Response Generation Approach for Reducing Toxicity in User-Generated Text</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ritwik</namePart>
<namePart type="family">Bose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ian</namePart>
<namePart type="family">Perera</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bonnie</namePart>
<namePart type="family">Dorr</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Social Influence in Conversations (SICon 2023)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kushal</namePart>
<namePart type="family">Chawla</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Weiyan</namePart>
<namePart type="family">Shi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Toronto, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The expression of opinions, stances, and moral foundations on social media often coincide with toxic, divisive, or inflammatory language that can make constructive discourse across communities difficult. Natural language generation methods could provide a means to reframe or reword such expressions in a way that fosters more civil discourse, yet current Large Language Model (LLM) methods tend towards language that is too generic or formal to seem authentic for social media discussions. We present preliminary work on training LLMs to maintain authenticity while presenting a community’s ideas and values in a constructive, non-toxic manner.</abstract>
<identifier type="citekey">bose-etal-2023-detoxifying</identifier>
<identifier type="doi">10.18653/v1/2023.sicon-1.2</identifier>
<location>
<url>https://aclanthology.org/2023.sicon-1.2</url>
</location>
<part>
<date>2023-07</date>
<extent unit="page">
<start>9</start>
<end>14</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Detoxifying Online Discourse: A Guided Response Generation Approach for Reducing Toxicity in User-Generated Text
%A Bose, Ritwik
%A Perera, Ian
%A Dorr, Bonnie
%Y Chawla, Kushal
%Y Shi, Weiyan
%S Proceedings of the First Workshop on Social Influence in Conversations (SICon 2023)
%D 2023
%8 July
%I Association for Computational Linguistics
%C Toronto, Canada
%F bose-etal-2023-detoxifying
%X The expression of opinions, stances, and moral foundations on social media often coincide with toxic, divisive, or inflammatory language that can make constructive discourse across communities difficult. Natural language generation methods could provide a means to reframe or reword such expressions in a way that fosters more civil discourse, yet current Large Language Model (LLM) methods tend towards language that is too generic or formal to seem authentic for social media discussions. We present preliminary work on training LLMs to maintain authenticity while presenting a community’s ideas and values in a constructive, non-toxic manner.
%R 10.18653/v1/2023.sicon-1.2
%U https://aclanthology.org/2023.sicon-1.2
%U https://doi.org/10.18653/v1/2023.sicon-1.2
%P 9-14
Markdown (Informal)
[Detoxifying Online Discourse: A Guided Response Generation Approach for Reducing Toxicity in User-Generated Text](https://aclanthology.org/2023.sicon-1.2) (Bose et al., SICon 2023)
ACL