@inproceedings{hu-lee-2026-hatexscore,
title = "{H}ate{XS}core: A Metric Suite for Evaluating Reasoning Quality in Hate Speech Explanations",
author = "Hu, Yujia and
Lee, Roy Ka-Wei",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 1: Long Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2026.eacl-long.198/",
pages = "4221--4240",
ISBN = "979-8-89176-380-7",
abstract = "Hateful speech detection is a key component of content moderation, yet current evaluation frameworks rarely assess why a text is deemed hateful. We introduce , a four-component metric suite designed to evaluate the reasoning quality of model explanations. It assesses (i) conclusion explicitness, (ii) faithfulness and causal grounding of quoted spans, (iii) protected group identification (policy-configurable), and (iv) logical consistency among these elements. Evaluated on six diverse hate speech datasets, reveals interpretability failures and annotation inconsistencies that are invisible to standard metrics like Accuracy or F1. Moreover, human evaluation shows strong agreement with , validating it as a practical tool for trustworthy and transparent moderation. Disclaimer: This paper contains sensitive content that may be disturbing to some readers."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hu-lee-2026-hatexscore">
<titleInfo>
<title>HateXScore: A Metric Suite for Evaluating Reasoning Quality in Hate Speech Explanations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yujia</namePart>
<namePart type="family">Hu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roy</namePart>
<namePart type="given">Ka-Wei</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2026-03</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 19th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vera</namePart>
<namePart type="family">Demberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kentaro</namePart>
<namePart type="family">Inui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lluís</namePart>
<namePart type="family">Marquez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Rabat, Morocco</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-380-7</identifier>
</relatedItem>
<abstract>Hateful speech detection is a key component of content moderation, yet current evaluation frameworks rarely assess why a text is deemed hateful. We introduce , a four-component metric suite designed to evaluate the reasoning quality of model explanations. It assesses (i) conclusion explicitness, (ii) faithfulness and causal grounding of quoted spans, (iii) protected group identification (policy-configurable), and (iv) logical consistency among these elements. Evaluated on six diverse hate speech datasets, reveals interpretability failures and annotation inconsistencies that are invisible to standard metrics like Accuracy or F1. Moreover, human evaluation shows strong agreement with , validating it as a practical tool for trustworthy and transparent moderation. Disclaimer: This paper contains sensitive content that may be disturbing to some readers.</abstract>
<identifier type="citekey">hu-lee-2026-hatexscore</identifier>
<location>
<url>https://aclanthology.org/2026.eacl-long.198/</url>
</location>
<part>
<date>2026-03</date>
<extent unit="page">
<start>4221</start>
<end>4240</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T HateXScore: A Metric Suite for Evaluating Reasoning Quality in Hate Speech Explanations
%A Hu, Yujia
%A Lee, Roy Ka-Wei
%Y Demberg, Vera
%Y Inui, Kentaro
%Y Marquez, Lluís
%S Proceedings of the 19th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2026
%8 March
%I Association for Computational Linguistics
%C Rabat, Morocco
%@ 979-8-89176-380-7
%F hu-lee-2026-hatexscore
%X Hateful speech detection is a key component of content moderation, yet current evaluation frameworks rarely assess why a text is deemed hateful. We introduce , a four-component metric suite designed to evaluate the reasoning quality of model explanations. It assesses (i) conclusion explicitness, (ii) faithfulness and causal grounding of quoted spans, (iii) protected group identification (policy-configurable), and (iv) logical consistency among these elements. Evaluated on six diverse hate speech datasets, reveals interpretability failures and annotation inconsistencies that are invisible to standard metrics like Accuracy or F1. Moreover, human evaluation shows strong agreement with , validating it as a practical tool for trustworthy and transparent moderation. Disclaimer: This paper contains sensitive content that may be disturbing to some readers.
%U https://aclanthology.org/2026.eacl-long.198/
%P 4221-4240
Markdown (Informal)
[HateXScore: A Metric Suite for Evaluating Reasoning Quality in Hate Speech Explanations](https://aclanthology.org/2026.eacl-long.198/) (Hu & Lee, EACL 2026)
ACL