@inproceedings{wu-etal-2024-chain,
title = "Chain-of-Quizzes: Pedagogy-inspired Example Selection in In-Context-Learning",
author = "Wu, Yiquan and
Zhou, Anlai and
Liu, Yuhang and
Liu, Yifei and
Jatowt, Adam and
Lu, Weiming and
Xiao, Jun and
Kuang, Kun",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2024",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-acl.603",
doi = "10.18653/v1/2024.findings-acl.603",
pages = "10136--10142",
abstract = "In-context learning (ICL) has emerged as a powerful tool for enhancing large language models (LLMs) in addressing downstream tasks. In this paper, we explore the vital task of example selection in ICL by mimicking the human learning process. We propose a Chain-of-Quizzes (CoQ) framework inspired by educational theories such as Bruner{'}s Spiral Learning and Mastery Learning theory. Specifically, our framework employs the LLMs to answer the quiz (question in the example) to sift {`}good{'} examples, combines these examples iteratively with the increasing complexity, and utilizes a final exam to gauge the combined example chains. Our extensive experiments on diverse reasoning datasets show the proposed approach outperforms baseline models. These findings underscore the framework{'}s potential for future research.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wu-etal-2024-chain">
<titleInfo>
<title>Chain-of-Quizzes: Pedagogy-inspired Example Selection in In-Context-Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yiquan</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anlai</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yuhang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yifei</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Adam</namePart>
<namePart type="family">Jatowt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Weiming</namePart>
<namePart type="family">Lu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jun</namePart>
<namePart type="family">Xiao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kun</namePart>
<namePart type="family">Kuang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lun-Wei</namePart>
<namePart type="family">Ku</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andre</namePart>
<namePart type="family">Martins</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In-context learning (ICL) has emerged as a powerful tool for enhancing large language models (LLMs) in addressing downstream tasks. In this paper, we explore the vital task of example selection in ICL by mimicking the human learning process. We propose a Chain-of-Quizzes (CoQ) framework inspired by educational theories such as Bruner’s Spiral Learning and Mastery Learning theory. Specifically, our framework employs the LLMs to answer the quiz (question in the example) to sift ‘good’ examples, combines these examples iteratively with the increasing complexity, and utilizes a final exam to gauge the combined example chains. Our extensive experiments on diverse reasoning datasets show the proposed approach outperforms baseline models. These findings underscore the framework’s potential for future research.</abstract>
<identifier type="citekey">wu-etal-2024-chain</identifier>
<identifier type="doi">10.18653/v1/2024.findings-acl.603</identifier>
<location>
<url>https://aclanthology.org/2024.findings-acl.603</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>10136</start>
<end>10142</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Chain-of-Quizzes: Pedagogy-inspired Example Selection in In-Context-Learning
%A Wu, Yiquan
%A Zhou, Anlai
%A Liu, Yuhang
%A Liu, Yifei
%A Jatowt, Adam
%A Lu, Weiming
%A Xiao, Jun
%A Kuang, Kun
%Y Ku, Lun-Wei
%Y Martins, Andre
%Y Srikumar, Vivek
%S Findings of the Association for Computational Linguistics: ACL 2024
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F wu-etal-2024-chain
%X In-context learning (ICL) has emerged as a powerful tool for enhancing large language models (LLMs) in addressing downstream tasks. In this paper, we explore the vital task of example selection in ICL by mimicking the human learning process. We propose a Chain-of-Quizzes (CoQ) framework inspired by educational theories such as Bruner’s Spiral Learning and Mastery Learning theory. Specifically, our framework employs the LLMs to answer the quiz (question in the example) to sift ‘good’ examples, combines these examples iteratively with the increasing complexity, and utilizes a final exam to gauge the combined example chains. Our extensive experiments on diverse reasoning datasets show the proposed approach outperforms baseline models. These findings underscore the framework’s potential for future research.
%R 10.18653/v1/2024.findings-acl.603
%U https://aclanthology.org/2024.findings-acl.603
%U https://doi.org/10.18653/v1/2024.findings-acl.603
%P 10136-10142
Markdown (Informal)
[Chain-of-Quizzes: Pedagogy-inspired Example Selection in In-Context-Learning](https://aclanthology.org/2024.findings-acl.603) (Wu et al., Findings 2024)
ACL
- Yiquan Wu, Anlai Zhou, Yuhang Liu, Yifei Liu, Adam Jatowt, Weiming Lu, Jun Xiao, and Kun Kuang. 2024. Chain-of-Quizzes: Pedagogy-inspired Example Selection in In-Context-Learning. In Findings of the Association for Computational Linguistics: ACL 2024, pages 10136–10142, Bangkok, Thailand. Association for Computational Linguistics.