@inproceedings{shekhar-etal-2026-integrity,
title = "Integrity Shield A System for Ethical {AI} Use {\&} Authorship Transparency in Assessments",
author = "Shekhar, Ashish Raj and
Agarwal, Shiven and
Bordoloi, Priyanuj and
Shah, Yash and
Anvekar, Tejas and
Gupta, Vivek",
editor = "Croce, Danilo and
Leidner, Jochen and
Moosavi, Nafise Sadat",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 3: System Demonstrations)",
month = mar,
year = "2026",
address = "Rabat, Marocco",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2026.eacl-demo.29/",
pages = "417--427",
ISBN = "979-8-89176-382-1",
abstract = "Multi-Modal Large Language Models (MLLMs) can now solve entire exams directlyfrom uploaded PDF assessments, raising urgent concerns about academic integrity and the reliability of grades and credentials. Existing watermarking techniques either operate at the token level or assume control over the model{'}s decoding process, making them ineffective when students query proprietary black-box systems using instructor-provided documents. We present INTEGRITYSHIELD,a document-layer watermarking system that embeds schema-aware, item-level watermarks into assessment PDFs while keeping their human-visible appearance unchanged. These watermarks consistently prevent MLLMs from answering shielded exam PDFs and encode stable, item-level signatures that can be reliably recovered from model or student responses. Across 30 question papers spanning STEM, humanities, and medical reasoning, INTEGRITYSHIELD achieves exceptionally high prevention (91-94{\%} exam-level blocking) and strong detection reliability (89-93{\%} signature retrieval) across four commercial MLLMs. Our demo showcases an interactive interface where instructors upload an exam, preview watermark behavior, and inspect pre/post AI performance and authorship evidence."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="shekhar-etal-2026-integrity">
<titleInfo>
<title>Integrity Shield A System for Ethical AI Use & Authorship Transparency in Assessments</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ashish</namePart>
<namePart type="given">Raj</namePart>
<namePart type="family">Shekhar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shiven</namePart>
<namePart type="family">Agarwal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Priyanuj</namePart>
<namePart type="family">Bordoloi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yash</namePart>
<namePart type="family">Shah</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tejas</namePart>
<namePart type="family">Anvekar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2026-03</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 19th Conference of the European Chapter of the Association for Computational Linguistics (Volume 3: System Demonstrations)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Danilo</namePart>
<namePart type="family">Croce</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jochen</namePart>
<namePart type="family">Leidner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nafise</namePart>
<namePart type="given">Sadat</namePart>
<namePart type="family">Moosavi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Rabat, Marocco</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-382-1</identifier>
</relatedItem>
<abstract>Multi-Modal Large Language Models (MLLMs) can now solve entire exams directlyfrom uploaded PDF assessments, raising urgent concerns about academic integrity and the reliability of grades and credentials. Existing watermarking techniques either operate at the token level or assume control over the model’s decoding process, making them ineffective when students query proprietary black-box systems using instructor-provided documents. We present INTEGRITYSHIELD,a document-layer watermarking system that embeds schema-aware, item-level watermarks into assessment PDFs while keeping their human-visible appearance unchanged. These watermarks consistently prevent MLLMs from answering shielded exam PDFs and encode stable, item-level signatures that can be reliably recovered from model or student responses. Across 30 question papers spanning STEM, humanities, and medical reasoning, INTEGRITYSHIELD achieves exceptionally high prevention (91-94% exam-level blocking) and strong detection reliability (89-93% signature retrieval) across four commercial MLLMs. Our demo showcases an interactive interface where instructors upload an exam, preview watermark behavior, and inspect pre/post AI performance and authorship evidence.</abstract>
<identifier type="citekey">shekhar-etal-2026-integrity</identifier>
<location>
<url>https://aclanthology.org/2026.eacl-demo.29/</url>
</location>
<part>
<date>2026-03</date>
<extent unit="page">
<start>417</start>
<end>427</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Integrity Shield A System for Ethical AI Use & Authorship Transparency in Assessments
%A Shekhar, Ashish Raj
%A Agarwal, Shiven
%A Bordoloi, Priyanuj
%A Shah, Yash
%A Anvekar, Tejas
%A Gupta, Vivek
%Y Croce, Danilo
%Y Leidner, Jochen
%Y Moosavi, Nafise Sadat
%S Proceedings of the 19th Conference of the European Chapter of the Association for Computational Linguistics (Volume 3: System Demonstrations)
%D 2026
%8 March
%I Association for Computational Linguistics
%C Rabat, Marocco
%@ 979-8-89176-382-1
%F shekhar-etal-2026-integrity
%X Multi-Modal Large Language Models (MLLMs) can now solve entire exams directlyfrom uploaded PDF assessments, raising urgent concerns about academic integrity and the reliability of grades and credentials. Existing watermarking techniques either operate at the token level or assume control over the model’s decoding process, making them ineffective when students query proprietary black-box systems using instructor-provided documents. We present INTEGRITYSHIELD,a document-layer watermarking system that embeds schema-aware, item-level watermarks into assessment PDFs while keeping their human-visible appearance unchanged. These watermarks consistently prevent MLLMs from answering shielded exam PDFs and encode stable, item-level signatures that can be reliably recovered from model or student responses. Across 30 question papers spanning STEM, humanities, and medical reasoning, INTEGRITYSHIELD achieves exceptionally high prevention (91-94% exam-level blocking) and strong detection reliability (89-93% signature retrieval) across four commercial MLLMs. Our demo showcases an interactive interface where instructors upload an exam, preview watermark behavior, and inspect pre/post AI performance and authorship evidence.
%U https://aclanthology.org/2026.eacl-demo.29/
%P 417-427
Markdown (Informal)
[Integrity Shield A System for Ethical AI Use & Authorship Transparency in Assessments](https://aclanthology.org/2026.eacl-demo.29/) (Shekhar et al., EACL 2026)
ACL