@inproceedings{yamazaki-etal-2025-pricot,
title = "{PRIC}o{T}: Principle Retrieval and Injection from Inference Successes and Failures for {C}o{T} Improvement",
author = "Yamazaki, Yudai and
Takeda, Naoto and
Nishimura, Yasutaka and
Ikeda, Kazushi",
editor = "Flek, Lucie and
Narayan, Shashi and
Phương, L{\^e} Hồng and
Pei, Jiahuan",
booktitle = "Proceedings of the 18th International Natural Language Generation Conference",
month = oct,
year = "2025",
address = "Hanoi, Vietnam",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.inlg-main.35/",
pages = "576--595",
abstract = "In-Context Learning (ICL) approaches, such as Zero-Shot and Few-Shot prompting, allow Large Language Models (LLMs) to tackle reasoning tasks without additional fine-tuning. However, Zero-Shot prompting often struggles with more complex tasks, whereas Few-Shot prompting demands considerable manual effort and domain expertise to design effective prompts. Although existing work has attempted to alleviate these issues by extracting reasoning rules from carefully crafted, task-specific representative examples, creating or obtaining such examples can be impractical in real-world scenarios. In this paper, we propose a novel approach that enhances the inference accuracy by injecting reasoning principles extracted from QA data, without relying on representative Few-Shot exemplars. This offers a lightweight yet adaptive way to boost accuracy on complex reasoning tasks, while avoiding manual effort and the high exploration costs typical of prior methods. Experiments on benchmarks show that, using GPT-4o, our method outperforms similarity-based Few-Shot and Zero-Shot prompting methods on challenging benchmarks such as GPQA-diamond, achieving an absolute accuracy improvement of up to 2{\%} in scenarios where carefully crafted Few-Shot examples are unavailable."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yamazaki-etal-2025-pricot">
<titleInfo>
<title>PRICoT: Principle Retrieval and Injection from Inference Successes and Failures for CoT Improvement</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yudai</namePart>
<namePart type="family">Yamazaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Naoto</namePart>
<namePart type="family">Takeda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yasutaka</namePart>
<namePart type="family">Nishimura</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kazushi</namePart>
<namePart type="family">Ikeda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 18th International Natural Language Generation Conference</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lucie</namePart>
<namePart type="family">Flek</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shashi</namePart>
<namePart type="family">Narayan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lê</namePart>
<namePart type="given">Hồng</namePart>
<namePart type="family">Phương</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiahuan</namePart>
<namePart type="family">Pei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Hanoi, Vietnam</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In-Context Learning (ICL) approaches, such as Zero-Shot and Few-Shot prompting, allow Large Language Models (LLMs) to tackle reasoning tasks without additional fine-tuning. However, Zero-Shot prompting often struggles with more complex tasks, whereas Few-Shot prompting demands considerable manual effort and domain expertise to design effective prompts. Although existing work has attempted to alleviate these issues by extracting reasoning rules from carefully crafted, task-specific representative examples, creating or obtaining such examples can be impractical in real-world scenarios. In this paper, we propose a novel approach that enhances the inference accuracy by injecting reasoning principles extracted from QA data, without relying on representative Few-Shot exemplars. This offers a lightweight yet adaptive way to boost accuracy on complex reasoning tasks, while avoiding manual effort and the high exploration costs typical of prior methods. Experiments on benchmarks show that, using GPT-4o, our method outperforms similarity-based Few-Shot and Zero-Shot prompting methods on challenging benchmarks such as GPQA-diamond, achieving an absolute accuracy improvement of up to 2% in scenarios where carefully crafted Few-Shot examples are unavailable.</abstract>
<identifier type="citekey">yamazaki-etal-2025-pricot</identifier>
<location>
<url>https://aclanthology.org/2025.inlg-main.35/</url>
</location>
<part>
<date>2025-10</date>
<extent unit="page">
<start>576</start>
<end>595</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T PRICoT: Principle Retrieval and Injection from Inference Successes and Failures for CoT Improvement
%A Yamazaki, Yudai
%A Takeda, Naoto
%A Nishimura, Yasutaka
%A Ikeda, Kazushi
%Y Flek, Lucie
%Y Narayan, Shashi
%Y Phương, Lê Hồng
%Y Pei, Jiahuan
%S Proceedings of the 18th International Natural Language Generation Conference
%D 2025
%8 October
%I Association for Computational Linguistics
%C Hanoi, Vietnam
%F yamazaki-etal-2025-pricot
%X In-Context Learning (ICL) approaches, such as Zero-Shot and Few-Shot prompting, allow Large Language Models (LLMs) to tackle reasoning tasks without additional fine-tuning. However, Zero-Shot prompting often struggles with more complex tasks, whereas Few-Shot prompting demands considerable manual effort and domain expertise to design effective prompts. Although existing work has attempted to alleviate these issues by extracting reasoning rules from carefully crafted, task-specific representative examples, creating or obtaining such examples can be impractical in real-world scenarios. In this paper, we propose a novel approach that enhances the inference accuracy by injecting reasoning principles extracted from QA data, without relying on representative Few-Shot exemplars. This offers a lightweight yet adaptive way to boost accuracy on complex reasoning tasks, while avoiding manual effort and the high exploration costs typical of prior methods. Experiments on benchmarks show that, using GPT-4o, our method outperforms similarity-based Few-Shot and Zero-Shot prompting methods on challenging benchmarks such as GPQA-diamond, achieving an absolute accuracy improvement of up to 2% in scenarios where carefully crafted Few-Shot examples are unavailable.
%U https://aclanthology.org/2025.inlg-main.35/
%P 576-595
Markdown (Informal)
[PRICoT: Principle Retrieval and Injection from Inference Successes and Failures for CoT Improvement](https://aclanthology.org/2025.inlg-main.35/) (Yamazaki et al., INLG 2025)
ACL