@inproceedings{quaremba-etal-2025-wetbench,
title = "{WETB}ench: A Benchmark for Detecting Task-Specific Machine-Generated Text on {W}ikipedia",
author = "Quaremba, Gerrit and
Black, Elizabeth and
Vrandecic, Denny and
Simperl, Elena",
editor = "Arora, Akhil and
Johnson, Isaac and
Kaffee, Lucie-Aim{\'e}e and
Kuo, Tzu-Sheng and
Piccardi, Tiziano and
Sen, Indira",
booktitle = "Proceedings of the 2nd Workshop on Advancing Natural Language Processing for Wikipedia (WikiNLP 2025)",
month = aug,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.wikinlp-1.6/",
doi = "10.18653/v1/2025.wikinlp-1.6",
pages = "10--30",
ISBN = "979-8-89176-284-8",
abstract = "Given Wikipedia{'}s role as a trusted source of high-quality, reliable content, there are growing concerns about the proliferation of low-quality machine-generated text (MGT) produced by large language models (LLMs) on its platform. Reliable detection of MGT is therefore essential, yet existing work primarily evaluates MGT detectors on generic generation tasks, rather than on tasks more commonly performed by Wikipedia editors. This misalignment can lead to poor generalisability when applied to real-world Wikipedia contexts.We introduce WETBench, a multilingual, multi-generator, and task-specific benchmark for MGT detection. We define three editing tasks empirically grounded in Wikipedia editors' perceived use cases for LLM-assisted editing: Paragraph Writing, Summarisation, and Text Style Transfer, which we implement using two new datasets across three languages. For each writing task, we evaluate three prompts, produce MGT across multiple generators using the best-performing prompt, and benchmark diverse detectors.We find that, across settings, training-based detectors achieve an average accuracy of 78{\%}, while zero-shot detectors average 58{\%}. These results demonstrate that detectors struggle with MGT in realistic generation scenarios and underscore the importance of evaluating such models on diverse, task-specific data to assess their reliability in editor-driven contexts."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="quaremba-etal-2025-wetbench">
<titleInfo>
<title>WETBench: A Benchmark for Detecting Task-Specific Machine-Generated Text on Wikipedia</title>
</titleInfo>
<name type="personal">
<namePart type="given">Gerrit</namePart>
<namePart type="family">Quaremba</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elizabeth</namePart>
<namePart type="family">Black</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Denny</namePart>
<namePart type="family">Vrandecic</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elena</namePart>
<namePart type="family">Simperl</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Workshop on Advancing Natural Language Processing for Wikipedia (WikiNLP 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Akhil</namePart>
<namePart type="family">Arora</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isaac</namePart>
<namePart type="family">Johnson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucie-Aimée</namePart>
<namePart type="family">Kaffee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tzu-Sheng</namePart>
<namePart type="family">Kuo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tiziano</namePart>
<namePart type="family">Piccardi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Indira</namePart>
<namePart type="family">Sen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-284-8</identifier>
</relatedItem>
<abstract>Given Wikipedia’s role as a trusted source of high-quality, reliable content, there are growing concerns about the proliferation of low-quality machine-generated text (MGT) produced by large language models (LLMs) on its platform. Reliable detection of MGT is therefore essential, yet existing work primarily evaluates MGT detectors on generic generation tasks, rather than on tasks more commonly performed by Wikipedia editors. This misalignment can lead to poor generalisability when applied to real-world Wikipedia contexts.We introduce WETBench, a multilingual, multi-generator, and task-specific benchmark for MGT detection. We define three editing tasks empirically grounded in Wikipedia editors’ perceived use cases for LLM-assisted editing: Paragraph Writing, Summarisation, and Text Style Transfer, which we implement using two new datasets across three languages. For each writing task, we evaluate three prompts, produce MGT across multiple generators using the best-performing prompt, and benchmark diverse detectors.We find that, across settings, training-based detectors achieve an average accuracy of 78%, while zero-shot detectors average 58%. These results demonstrate that detectors struggle with MGT in realistic generation scenarios and underscore the importance of evaluating such models on diverse, task-specific data to assess their reliability in editor-driven contexts.</abstract>
<identifier type="citekey">quaremba-etal-2025-wetbench</identifier>
<identifier type="doi">10.18653/v1/2025.wikinlp-1.6</identifier>
<location>
<url>https://aclanthology.org/2025.wikinlp-1.6/</url>
</location>
<part>
<date>2025-08</date>
<extent unit="page">
<start>10</start>
<end>30</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T WETBench: A Benchmark for Detecting Task-Specific Machine-Generated Text on Wikipedia
%A Quaremba, Gerrit
%A Black, Elizabeth
%A Vrandecic, Denny
%A Simperl, Elena
%Y Arora, Akhil
%Y Johnson, Isaac
%Y Kaffee, Lucie-Aimée
%Y Kuo, Tzu-Sheng
%Y Piccardi, Tiziano
%Y Sen, Indira
%S Proceedings of the 2nd Workshop on Advancing Natural Language Processing for Wikipedia (WikiNLP 2025)
%D 2025
%8 August
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-284-8
%F quaremba-etal-2025-wetbench
%X Given Wikipedia’s role as a trusted source of high-quality, reliable content, there are growing concerns about the proliferation of low-quality machine-generated text (MGT) produced by large language models (LLMs) on its platform. Reliable detection of MGT is therefore essential, yet existing work primarily evaluates MGT detectors on generic generation tasks, rather than on tasks more commonly performed by Wikipedia editors. This misalignment can lead to poor generalisability when applied to real-world Wikipedia contexts.We introduce WETBench, a multilingual, multi-generator, and task-specific benchmark for MGT detection. We define three editing tasks empirically grounded in Wikipedia editors’ perceived use cases for LLM-assisted editing: Paragraph Writing, Summarisation, and Text Style Transfer, which we implement using two new datasets across three languages. For each writing task, we evaluate three prompts, produce MGT across multiple generators using the best-performing prompt, and benchmark diverse detectors.We find that, across settings, training-based detectors achieve an average accuracy of 78%, while zero-shot detectors average 58%. These results demonstrate that detectors struggle with MGT in realistic generation scenarios and underscore the importance of evaluating such models on diverse, task-specific data to assess their reliability in editor-driven contexts.
%R 10.18653/v1/2025.wikinlp-1.6
%U https://aclanthology.org/2025.wikinlp-1.6/
%U https://doi.org/10.18653/v1/2025.wikinlp-1.6
%P 10-30
Markdown (Informal)
[WETBench: A Benchmark for Detecting Task-Specific Machine-Generated Text on Wikipedia](https://aclanthology.org/2025.wikinlp-1.6/) (Quaremba et al., WikiNLP 2025)
ACL