@inproceedings{li-etal-2025-c2leva,
title = "{C}$^2${LEVA}: Toward Comprehensive and Contamination-Free Language Model Evaluation",
author = "Li, Yanyang and
Long, Wong Tin and
Hung, Cheung To and
Zhao, Jianqiao and
Zheng, Duo and
Wai, Liu Ka and
Lyu, Michael R. and
Wang, Liwei",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-acl.116/",
doi = "10.18653/v1/2025.findings-acl.116",
pages = "2283--2306",
ISBN = "979-8-89176-256-5",
abstract = "Recent advances in large language models (LLMs) have shown significant promise, yet their evaluation raises concerns, particularly regarding data contamination due to the lack of access to proprietary training data. To address this issue, we present C$^2$LEVA, a comprehensive bilingual benchmark featuring systematic contamination prevention. C$^2$LEVA firstly offers a holistic evaluation encompassing 22 tasks, each targeting a specific application or ability of LLMs, and secondly a trustworthy assessment due to our contamination-free tasks, ensured by a systematic contamination prevention strategy that fully automates test data renewal and enforces data protection during benchmark data release. Our large-scale evaluation of 15 open-source and proprietary models demonstrates the effectiveness of C$^2$LEVA."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="li-etal-2025-c2leva">
<titleInfo>
<title>C²LEVA: Toward Comprehensive and Contamination-Free Language Model Evaluation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yanyang</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wong</namePart>
<namePart type="given">Tin</namePart>
<namePart type="family">Long</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cheung</namePart>
<namePart type="given">To</namePart>
<namePart type="family">Hung</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jianqiao</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Duo</namePart>
<namePart type="family">Zheng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Liu</namePart>
<namePart type="given">Ka</namePart>
<namePart type="family">Wai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="given">R</namePart>
<namePart type="family">Lyu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Liwei</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-256-5</identifier>
</relatedItem>
<abstract>Recent advances in large language models (LLMs) have shown significant promise, yet their evaluation raises concerns, particularly regarding data contamination due to the lack of access to proprietary training data. To address this issue, we present C²LEVA, a comprehensive bilingual benchmark featuring systematic contamination prevention. C²LEVA firstly offers a holistic evaluation encompassing 22 tasks, each targeting a specific application or ability of LLMs, and secondly a trustworthy assessment due to our contamination-free tasks, ensured by a systematic contamination prevention strategy that fully automates test data renewal and enforces data protection during benchmark data release. Our large-scale evaluation of 15 open-source and proprietary models demonstrates the effectiveness of C²LEVA.</abstract>
<identifier type="citekey">li-etal-2025-c2leva</identifier>
<identifier type="doi">10.18653/v1/2025.findings-acl.116</identifier>
<location>
<url>https://aclanthology.org/2025.findings-acl.116/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>2283</start>
<end>2306</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T C²LEVA: Toward Comprehensive and Contamination-Free Language Model Evaluation
%A Li, Yanyang
%A Long, Wong Tin
%A Hung, Cheung To
%A Zhao, Jianqiao
%A Zheng, Duo
%A Wai, Liu Ka
%A Lyu, Michael R.
%A Wang, Liwei
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Findings of the Association for Computational Linguistics: ACL 2025
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-256-5
%F li-etal-2025-c2leva
%X Recent advances in large language models (LLMs) have shown significant promise, yet their evaluation raises concerns, particularly regarding data contamination due to the lack of access to proprietary training data. To address this issue, we present C²LEVA, a comprehensive bilingual benchmark featuring systematic contamination prevention. C²LEVA firstly offers a holistic evaluation encompassing 22 tasks, each targeting a specific application or ability of LLMs, and secondly a trustworthy assessment due to our contamination-free tasks, ensured by a systematic contamination prevention strategy that fully automates test data renewal and enforces data protection during benchmark data release. Our large-scale evaluation of 15 open-source and proprietary models demonstrates the effectiveness of C²LEVA.
%R 10.18653/v1/2025.findings-acl.116
%U https://aclanthology.org/2025.findings-acl.116/
%U https://doi.org/10.18653/v1/2025.findings-acl.116
%P 2283-2306
Markdown (Informal)
[C2LEVA: Toward Comprehensive and Contamination-Free Language Model Evaluation](https://aclanthology.org/2025.findings-acl.116/) (Li et al., Findings 2025)
ACL
- Yanyang Li, Wong Tin Long, Cheung To Hung, Jianqiao Zhao, Duo Zheng, Liu Ka Wai, Michael R. Lyu, and Liwei Wang. 2025. C2LEVA: Toward Comprehensive and Contamination-Free Language Model Evaluation. In Findings of the Association for Computational Linguistics: ACL 2025, pages 2283–2306, Vienna, Austria. Association for Computational Linguistics.