@inproceedings{grace-etal-2023-olea,
title = "{OLEA}: Tool and Infrastructure for Offensive Language Error Analysis in {E}nglish",
author = "Grace, Marie and
Seabrum, Jay and
Srinivas, Dananjay and
Palmer, Alexis",
editor = "Croce, Danilo and
Soldaini, Luca",
booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.eacl-demo.24",
doi = "10.18653/v1/2023.eacl-demo.24",
pages = "209--218",
abstract = "State-of-the-art models for identifying offensive language often fail to generalize over more nuanced or implicit cases of offensive and hateful language. Understanding model performance on complex cases is key for building robust models that are effective in real-world settings. To help researchers efficiently evaluate their models, we introduce OLEA, a diagnostic, open-source, extensible Python library that provides easy-to-use tools for error analysis in the context of detecting offensive language in English. OLEA packages analyses and datasets proposed by prior scholarship, empowering researchers to build effective, explainable and generalizable offensive language classifiers.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="grace-etal-2023-olea">
<titleInfo>
<title>OLEA: Tool and Infrastructure for Offensive Language Error Analysis in English</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marie</namePart>
<namePart type="family">Grace</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jay</namePart>
<namePart type="family">Seabrum</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dananjay</namePart>
<namePart type="family">Srinivas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexis</namePart>
<namePart type="family">Palmer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Danilo</namePart>
<namePart type="family">Croce</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Luca</namePart>
<namePart type="family">Soldaini</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dubrovnik, Croatia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>State-of-the-art models for identifying offensive language often fail to generalize over more nuanced or implicit cases of offensive and hateful language. Understanding model performance on complex cases is key for building robust models that are effective in real-world settings. To help researchers efficiently evaluate their models, we introduce OLEA, a diagnostic, open-source, extensible Python library that provides easy-to-use tools for error analysis in the context of detecting offensive language in English. OLEA packages analyses and datasets proposed by prior scholarship, empowering researchers to build effective, explainable and generalizable offensive language classifiers.</abstract>
<identifier type="citekey">grace-etal-2023-olea</identifier>
<identifier type="doi">10.18653/v1/2023.eacl-demo.24</identifier>
<location>
<url>https://aclanthology.org/2023.eacl-demo.24</url>
</location>
<part>
<date>2023-05</date>
<extent unit="page">
<start>209</start>
<end>218</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T OLEA: Tool and Infrastructure for Offensive Language Error Analysis in English
%A Grace, Marie
%A Seabrum, Jay
%A Srinivas, Dananjay
%A Palmer, Alexis
%Y Croce, Danilo
%Y Soldaini, Luca
%S Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations
%D 2023
%8 May
%I Association for Computational Linguistics
%C Dubrovnik, Croatia
%F grace-etal-2023-olea
%X State-of-the-art models for identifying offensive language often fail to generalize over more nuanced or implicit cases of offensive and hateful language. Understanding model performance on complex cases is key for building robust models that are effective in real-world settings. To help researchers efficiently evaluate their models, we introduce OLEA, a diagnostic, open-source, extensible Python library that provides easy-to-use tools for error analysis in the context of detecting offensive language in English. OLEA packages analyses and datasets proposed by prior scholarship, empowering researchers to build effective, explainable and generalizable offensive language classifiers.
%R 10.18653/v1/2023.eacl-demo.24
%U https://aclanthology.org/2023.eacl-demo.24
%U https://doi.org/10.18653/v1/2023.eacl-demo.24
%P 209-218
Markdown (Informal)
[OLEA: Tool and Infrastructure for Offensive Language Error Analysis in English](https://aclanthology.org/2023.eacl-demo.24) (Grace et al., EACL 2023)
ACL