@inproceedings{brun-nikoulina-2024-frenchtoxicityprompts,
title = "{F}rench{T}oxicity{P}rompts: a Large Benchmark for Evaluating and Mitigating Toxicity in {F}rench Texts",
author = "Brun, Caroline and
Nikoulina, Vassilina",
editor = "Kumar, Ritesh and
Ojha, Atul Kr. and
Malmasi, Shervin and
Chakravarthi, Bharathi Raja and
Lahiri, Bornini and
Singh, Siddharth and
Ratan, Shyam",
booktitle = "Proceedings of the Fourth Workshop on Threat, Aggression {\&} Cyberbullying @ LREC-COLING-2024",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://aclanthology.org/2024.trac-1.12",
pages = "105--114",
abstract = "Large language models (LLMs) are increasingly popular but are also prone to generating bias, toxic or harmful language, which can have detrimental effects on individuals and communities. Although most efforts is put to assess and mitigate toxicity in generated content, it is primarily concentrated on English, while it{'}s essential to consider other languages as well. For addressing this issue, we create and release FrenchToxicityPrompts, a dataset of 50K naturally occurring French prompts and their continuations, annotated with toxicity scores from a widely used toxicity classifier. We evaluate 14 different models from four prevalent open-sourced families of LLMs against our dataset to assess their potential toxicity across various dimensions. We hope that our contribution will foster future research on toxicity detection and mitigation beyond English.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="brun-nikoulina-2024-frenchtoxicityprompts">
<titleInfo>
<title>FrenchToxicityPrompts: a Large Benchmark for Evaluating and Mitigating Toxicity in French Texts</title>
</titleInfo>
<name type="personal">
<namePart type="given">Caroline</namePart>
<namePart type="family">Brun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vassilina</namePart>
<namePart type="family">Nikoulina</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourth Workshop on Threat, Aggression & Cyberbullying @ LREC-COLING-2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ritesh</namePart>
<namePart type="family">Kumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Atul</namePart>
<namePart type="given">Kr.</namePart>
<namePart type="family">Ojha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shervin</namePart>
<namePart type="family">Malmasi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bharathi</namePart>
<namePart type="given">Raja</namePart>
<namePart type="family">Chakravarthi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bornini</namePart>
<namePart type="family">Lahiri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Siddharth</namePart>
<namePart type="family">Singh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shyam</namePart>
<namePart type="family">Ratan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>ELRA and ICCL</publisher>
<place>
<placeTerm type="text">Torino, Italia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large language models (LLMs) are increasingly popular but are also prone to generating bias, toxic or harmful language, which can have detrimental effects on individuals and communities. Although most efforts is put to assess and mitigate toxicity in generated content, it is primarily concentrated on English, while it’s essential to consider other languages as well. For addressing this issue, we create and release FrenchToxicityPrompts, a dataset of 50K naturally occurring French prompts and their continuations, annotated with toxicity scores from a widely used toxicity classifier. We evaluate 14 different models from four prevalent open-sourced families of LLMs against our dataset to assess their potential toxicity across various dimensions. We hope that our contribution will foster future research on toxicity detection and mitigation beyond English.</abstract>
<identifier type="citekey">brun-nikoulina-2024-frenchtoxicityprompts</identifier>
<location>
<url>https://aclanthology.org/2024.trac-1.12</url>
</location>
<part>
<date>2024-05</date>
<extent unit="page">
<start>105</start>
<end>114</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T FrenchToxicityPrompts: a Large Benchmark for Evaluating and Mitigating Toxicity in French Texts
%A Brun, Caroline
%A Nikoulina, Vassilina
%Y Kumar, Ritesh
%Y Ojha, Atul Kr.
%Y Malmasi, Shervin
%Y Chakravarthi, Bharathi Raja
%Y Lahiri, Bornini
%Y Singh, Siddharth
%Y Ratan, Shyam
%S Proceedings of the Fourth Workshop on Threat, Aggression & Cyberbullying @ LREC-COLING-2024
%D 2024
%8 May
%I ELRA and ICCL
%C Torino, Italia
%F brun-nikoulina-2024-frenchtoxicityprompts
%X Large language models (LLMs) are increasingly popular but are also prone to generating bias, toxic or harmful language, which can have detrimental effects on individuals and communities. Although most efforts is put to assess and mitigate toxicity in generated content, it is primarily concentrated on English, while it’s essential to consider other languages as well. For addressing this issue, we create and release FrenchToxicityPrompts, a dataset of 50K naturally occurring French prompts and their continuations, annotated with toxicity scores from a widely used toxicity classifier. We evaluate 14 different models from four prevalent open-sourced families of LLMs against our dataset to assess their potential toxicity across various dimensions. We hope that our contribution will foster future research on toxicity detection and mitigation beyond English.
%U https://aclanthology.org/2024.trac-1.12
%P 105-114
Markdown (Informal)
[FrenchToxicityPrompts: a Large Benchmark for Evaluating and Mitigating Toxicity in French Texts](https://aclanthology.org/2024.trac-1.12) (Brun & Nikoulina, TRAC-WS 2024)
ACL