@inproceedings{chen-etal-2025-implicit,
title = "From Implicit Exploration to Structured Reasoning: Guideline and Refinement for {LLM}s",
author = "Chen, Jiaxiang and
Wang, Zhuo and
Zou, Mingxi and
Li, Zhucong and
Zhou, Zhijian and
Wang, Song and
Xu, Zenglin",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.196/",
doi = "10.18653/v1/2025.findings-emnlp.196",
pages = "3672--3684",
ISBN = "979-8-89176-335-7",
abstract = "Large language models (LLMs) have advanced general-purpose reasoning, showing strong performance across diverse tasks. However, existing methods often rely on implicit exploration, where the model follows stochastic and unguided reasoning paths{---}like walking without a map. This leads to unstable reasoning paths, lack of error correction, and limited learning from past experience. To address these issues, we propose a framework that shifts from implicit exploration to structured reasoning through guideline and refinement. First, we extract structured reasoning patterns from successful trajectories and reflective signals from failures. During inference, the model follows these guidelines step-by-step, with refinement applied after each step to correct errors and stabilize the reasoning process. Experiments on the Big-Bench Hard (BBH) benchmark show that our method consistently outperforms strong baselines across diverse reasoning tasks. Analysis reveals that stepwise execution, refinement, and experience-based learning improve stability and generalization. We further explore model collaboration during refinement, offering insights into cross-model interactions. Notably, structured reasoning guided by learned instructions matches or even surpasses knowledge distilled through SFT, highlighting its scalability and effectiveness."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chen-etal-2025-implicit">
<titleInfo>
<title>From Implicit Exploration to Structured Reasoning: Guideline and Refinement for LLMs</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jiaxiang</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhuo</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mingxi</namePart>
<namePart type="family">Zou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhucong</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhijian</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Song</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zenglin</namePart>
<namePart type="family">Xu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>Large language models (LLMs) have advanced general-purpose reasoning, showing strong performance across diverse tasks. However, existing methods often rely on implicit exploration, where the model follows stochastic and unguided reasoning paths—like walking without a map. This leads to unstable reasoning paths, lack of error correction, and limited learning from past experience. To address these issues, we propose a framework that shifts from implicit exploration to structured reasoning through guideline and refinement. First, we extract structured reasoning patterns from successful trajectories and reflective signals from failures. During inference, the model follows these guidelines step-by-step, with refinement applied after each step to correct errors and stabilize the reasoning process. Experiments on the Big-Bench Hard (BBH) benchmark show that our method consistently outperforms strong baselines across diverse reasoning tasks. Analysis reveals that stepwise execution, refinement, and experience-based learning improve stability and generalization. We further explore model collaboration during refinement, offering insights into cross-model interactions. Notably, structured reasoning guided by learned instructions matches or even surpasses knowledge distilled through SFT, highlighting its scalability and effectiveness.</abstract>
<identifier type="citekey">chen-etal-2025-implicit</identifier>
<identifier type="doi">10.18653/v1/2025.findings-emnlp.196</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.196/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>3672</start>
<end>3684</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T From Implicit Exploration to Structured Reasoning: Guideline and Refinement for LLMs
%A Chen, Jiaxiang
%A Wang, Zhuo
%A Zou, Mingxi
%A Li, Zhucong
%A Zhou, Zhijian
%A Wang, Song
%A Xu, Zenglin
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F chen-etal-2025-implicit
%X Large language models (LLMs) have advanced general-purpose reasoning, showing strong performance across diverse tasks. However, existing methods often rely on implicit exploration, where the model follows stochastic and unguided reasoning paths—like walking without a map. This leads to unstable reasoning paths, lack of error correction, and limited learning from past experience. To address these issues, we propose a framework that shifts from implicit exploration to structured reasoning through guideline and refinement. First, we extract structured reasoning patterns from successful trajectories and reflective signals from failures. During inference, the model follows these guidelines step-by-step, with refinement applied after each step to correct errors and stabilize the reasoning process. Experiments on the Big-Bench Hard (BBH) benchmark show that our method consistently outperforms strong baselines across diverse reasoning tasks. Analysis reveals that stepwise execution, refinement, and experience-based learning improve stability and generalization. We further explore model collaboration during refinement, offering insights into cross-model interactions. Notably, structured reasoning guided by learned instructions matches or even surpasses knowledge distilled through SFT, highlighting its scalability and effectiveness.
%R 10.18653/v1/2025.findings-emnlp.196
%U https://aclanthology.org/2025.findings-emnlp.196/
%U https://doi.org/10.18653/v1/2025.findings-emnlp.196
%P 3672-3684
Markdown (Informal)
[From Implicit Exploration to Structured Reasoning: Guideline and Refinement for LLMs](https://aclanthology.org/2025.findings-emnlp.196/) (Chen et al., Findings 2025)
ACL