@inproceedings{ravikiran-etal-2025-indra,
title = "{INDRA}: Iterative Difficulty Refinement Attention for {MCQ} Difficulty Estimation for {I}ndic Languages",
author = "Ravikiran, Manikandan and
Saluja, Rohit and
Bhavsar, Arnav",
editor = "Bhattacharya, Arnab and
Goyal, Pawan and
Ghosh, Saptarshi and
Ghosh, Kripabandhu",
booktitle = "Proceedings of the 1st Workshop on Benchmarks, Harmonization, Annotation, and Standardization for Human-Centric AI in Indian Languages (BHASHA 2025)",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.bhasha-1.4/",
pages = "37--51",
ISBN = "979-8-89176-313-5",
abstract = "Estimating the difficulty of multiple-choice questions (MCQs) is central to adaptive testing and learner modeling. We introduce \textbf{INDRA} (Iterative Difficulty Refinement Attention), a novel attention mechanism that unifies psychometric priors with neural refinement for Indic MCQ difficulty estimation. INDRA incorporates three key innovations: (i) \textit{IRT-informed initialization}, which assigns token-level discrimination and difficulty scores to embed psychometric interpretability; (ii) \textit{entropy-driven iterative refinement}, which progressively sharpens attention to mimic the human process of distractor elimination; and (iii) \textit{Indic Aware Graph Coupling}, which propagates plausibility across morphologically and semantically related tokens, a critical feature for Indic languages. Experiments on TEEMIL-H and TEEMIL-K datasets show that INDRA achieves consistent improvements, with absolute gains of up to +1.02 F1 and +1.68 F1 over state-of-the-art, while demonstrating through ablation studies that psychometric priors, entropy refinement, and graph coupling contribute complementary gains to accuracy and robustness."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ravikiran-etal-2025-indra">
<titleInfo>
<title>INDRA: Iterative Difficulty Refinement Attention for MCQ Difficulty Estimation for Indic Languages</title>
</titleInfo>
<name type="personal">
<namePart type="given">Manikandan</namePart>
<namePart type="family">Ravikiran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rohit</namePart>
<namePart type="family">Saluja</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arnav</namePart>
<namePart type="family">Bhavsar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Benchmarks, Harmonization, Annotation, and Standardization for Human-Centric AI in Indian Languages (BHASHA 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Arnab</namePart>
<namePart type="family">Bhattacharya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pawan</namePart>
<namePart type="family">Goyal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saptarshi</namePart>
<namePart type="family">Ghosh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kripabandhu</namePart>
<namePart type="family">Ghosh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mumbai, India</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-313-5</identifier>
</relatedItem>
<abstract>Estimating the difficulty of multiple-choice questions (MCQs) is central to adaptive testing and learner modeling. We introduce INDRA (Iterative Difficulty Refinement Attention), a novel attention mechanism that unifies psychometric priors with neural refinement for Indic MCQ difficulty estimation. INDRA incorporates three key innovations: (i) IRT-informed initialization, which assigns token-level discrimination and difficulty scores to embed psychometric interpretability; (ii) entropy-driven iterative refinement, which progressively sharpens attention to mimic the human process of distractor elimination; and (iii) Indic Aware Graph Coupling, which propagates plausibility across morphologically and semantically related tokens, a critical feature for Indic languages. Experiments on TEEMIL-H and TEEMIL-K datasets show that INDRA achieves consistent improvements, with absolute gains of up to +1.02 F1 and +1.68 F1 over state-of-the-art, while demonstrating through ablation studies that psychometric priors, entropy refinement, and graph coupling contribute complementary gains to accuracy and robustness.</abstract>
<identifier type="citekey">ravikiran-etal-2025-indra</identifier>
<location>
<url>https://aclanthology.org/2025.bhasha-1.4/</url>
</location>
<part>
<date>2025-12</date>
<extent unit="page">
<start>37</start>
<end>51</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T INDRA: Iterative Difficulty Refinement Attention for MCQ Difficulty Estimation for Indic Languages
%A Ravikiran, Manikandan
%A Saluja, Rohit
%A Bhavsar, Arnav
%Y Bhattacharya, Arnab
%Y Goyal, Pawan
%Y Ghosh, Saptarshi
%Y Ghosh, Kripabandhu
%S Proceedings of the 1st Workshop on Benchmarks, Harmonization, Annotation, and Standardization for Human-Centric AI in Indian Languages (BHASHA 2025)
%D 2025
%8 December
%I Association for Computational Linguistics
%C Mumbai, India
%@ 979-8-89176-313-5
%F ravikiran-etal-2025-indra
%X Estimating the difficulty of multiple-choice questions (MCQs) is central to adaptive testing and learner modeling. We introduce INDRA (Iterative Difficulty Refinement Attention), a novel attention mechanism that unifies psychometric priors with neural refinement for Indic MCQ difficulty estimation. INDRA incorporates three key innovations: (i) IRT-informed initialization, which assigns token-level discrimination and difficulty scores to embed psychometric interpretability; (ii) entropy-driven iterative refinement, which progressively sharpens attention to mimic the human process of distractor elimination; and (iii) Indic Aware Graph Coupling, which propagates plausibility across morphologically and semantically related tokens, a critical feature for Indic languages. Experiments on TEEMIL-H and TEEMIL-K datasets show that INDRA achieves consistent improvements, with absolute gains of up to +1.02 F1 and +1.68 F1 over state-of-the-art, while demonstrating through ablation studies that psychometric priors, entropy refinement, and graph coupling contribute complementary gains to accuracy and robustness.
%U https://aclanthology.org/2025.bhasha-1.4/
%P 37-51
Markdown (Informal)
[INDRA: Iterative Difficulty Refinement Attention for MCQ Difficulty Estimation for Indic Languages](https://aclanthology.org/2025.bhasha-1.4/) (Ravikiran et al., BHASHA 2025)
ACL