@inproceedings{peng-etal-2025-kele,
title = "{KELE}: A Multi-Agent Framework for Structured Socratic Teaching with Large Language Models",
author = "Peng, Xian and
Yuan, Pan and
Li, Dong and
Cheng, Junlong and
Fang, Qin and
Liu, Zhi",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.888/",
pages = "16342--16362",
ISBN = "979-8-89176-335-7",
abstract = "Socratic teaching, known for its emphasis on heuristic questioning and deep thinking, has demonstrated significant advantages in promoting students' cognitive development. However, traditional Socratic teaching places high demands on teachers' expertise and real-time feedback capabilities, making it difficult to scale in large educational settings. Recent breakthroughs in large language models (LLMs) in natural language generation and dialogue comprehension offer the potential for automated Socratic teaching. In this paper, we propose Knowledge-Enlightened Learning Enhanced by LLMs (KELE), a novel multi-agent framework for structured Socratic teaching with LLMs. KELE constructs a structured Socratic teaching rule system (SocRule) and a ``consultant{--}teacher'' multi-agent collaborative teaching mechanism, in which two LLMs respectively take charge of teaching planning and execution, ensuring a logically coherent and hierarchically structured Socratic teaching process. We also construct SocratDataset, a structured Socratic teaching dataset covering 34 teaching strategies and over 42,000 dialogue turns, and train SocratTeachLLM, a specialized LLM for Socratic teaching tasks. Additionally, we build a comprehensive Socratic teaching quality evaluation system for LLMs, covering 9 dimensions from single-turn dialogue to multi-turn teaching processes. Experimental results show that SocratTeachLLM significantly outperforms GPT-4o, which has a much larger parameter size, across all Socratic teaching capabilities."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="peng-etal-2025-kele">
<titleInfo>
<title>KELE: A Multi-Agent Framework for Structured Socratic Teaching with Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Xian</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pan</namePart>
<namePart type="family">Yuan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dong</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Junlong</namePart>
<namePart type="family">Cheng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qin</namePart>
<namePart type="family">Fang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhi</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>Socratic teaching, known for its emphasis on heuristic questioning and deep thinking, has demonstrated significant advantages in promoting students’ cognitive development. However, traditional Socratic teaching places high demands on teachers’ expertise and real-time feedback capabilities, making it difficult to scale in large educational settings. Recent breakthroughs in large language models (LLMs) in natural language generation and dialogue comprehension offer the potential for automated Socratic teaching. In this paper, we propose Knowledge-Enlightened Learning Enhanced by LLMs (KELE), a novel multi-agent framework for structured Socratic teaching with LLMs. KELE constructs a structured Socratic teaching rule system (SocRule) and a “consultant–teacher” multi-agent collaborative teaching mechanism, in which two LLMs respectively take charge of teaching planning and execution, ensuring a logically coherent and hierarchically structured Socratic teaching process. We also construct SocratDataset, a structured Socratic teaching dataset covering 34 teaching strategies and over 42,000 dialogue turns, and train SocratTeachLLM, a specialized LLM for Socratic teaching tasks. Additionally, we build a comprehensive Socratic teaching quality evaluation system for LLMs, covering 9 dimensions from single-turn dialogue to multi-turn teaching processes. Experimental results show that SocratTeachLLM significantly outperforms GPT-4o, which has a much larger parameter size, across all Socratic teaching capabilities.</abstract>
<identifier type="citekey">peng-etal-2025-kele</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.888/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>16342</start>
<end>16362</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T KELE: A Multi-Agent Framework for Structured Socratic Teaching with Large Language Models
%A Peng, Xian
%A Yuan, Pan
%A Li, Dong
%A Cheng, Junlong
%A Fang, Qin
%A Liu, Zhi
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F peng-etal-2025-kele
%X Socratic teaching, known for its emphasis on heuristic questioning and deep thinking, has demonstrated significant advantages in promoting students’ cognitive development. However, traditional Socratic teaching places high demands on teachers’ expertise and real-time feedback capabilities, making it difficult to scale in large educational settings. Recent breakthroughs in large language models (LLMs) in natural language generation and dialogue comprehension offer the potential for automated Socratic teaching. In this paper, we propose Knowledge-Enlightened Learning Enhanced by LLMs (KELE), a novel multi-agent framework for structured Socratic teaching with LLMs. KELE constructs a structured Socratic teaching rule system (SocRule) and a “consultant–teacher” multi-agent collaborative teaching mechanism, in which two LLMs respectively take charge of teaching planning and execution, ensuring a logically coherent and hierarchically structured Socratic teaching process. We also construct SocratDataset, a structured Socratic teaching dataset covering 34 teaching strategies and over 42,000 dialogue turns, and train SocratTeachLLM, a specialized LLM for Socratic teaching tasks. Additionally, we build a comprehensive Socratic teaching quality evaluation system for LLMs, covering 9 dimensions from single-turn dialogue to multi-turn teaching processes. Experimental results show that SocratTeachLLM significantly outperforms GPT-4o, which has a much larger parameter size, across all Socratic teaching capabilities.
%U https://aclanthology.org/2025.findings-emnlp.888/
%P 16342-16362
Markdown (Informal)
[KELE: A Multi-Agent Framework for Structured Socratic Teaching with Large Language Models](https://aclanthology.org/2025.findings-emnlp.888/) (Peng et al., Findings 2025)
ACL