@inproceedings{bhope-etal-2025-optiseq,
title = "{O}pti{S}eq: Ordering Examples On-The-Fly for In-Context Learning",
author = "Bhope, Rahul Atul and
Venkateswaran, Praveen and
Jayaram, K. R. and
Isahagian, Vatche and
Muthusamy, Vinod and
Venkatasubramanian, Nalini",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.1353/",
pages = "24864--24887",
ISBN = "979-8-89176-335-7",
abstract = "Developers using LLMs and LLM-based agents in their applications have provided plenty of anecdotal evidencethat in-context-learning (ICL) is fragile. In this paper, we show that in addition to the quantity and quality of examples, the order in which the in-context examples are listed in the prompt affects the output of the LLM and, consequently, their performance. While prior work has explored improving ICL through dataset-dependent techniques, we introduce , a purely inference-time, dataset-free optimization method that efficiently determines the best example order. OptiSeq leverages log probabilities of LLM-generated outputs to systematically prune the search space of possible orderings and recommend the best order(s) by distinguishing orderings that yield high levels of accuracy and those that underperform. Extensive empirical evaluation on multiple LLMs, datasets, and prompts demonstrates that OptiSeq improves accuracy by 5.5 - 10.5 percentage points across multiple tasks."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bhope-etal-2025-optiseq">
<titleInfo>
<title>OptiSeq: Ordering Examples On-The-Fly for In-Context Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Rahul</namePart>
<namePart type="given">Atul</namePart>
<namePart type="family">Bhope</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Praveen</namePart>
<namePart type="family">Venkateswaran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">K</namePart>
<namePart type="given">R</namePart>
<namePart type="family">Jayaram</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vatche</namePart>
<namePart type="family">Isahagian</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vinod</namePart>
<namePart type="family">Muthusamy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nalini</namePart>
<namePart type="family">Venkatasubramanian</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>Developers using LLMs and LLM-based agents in their applications have provided plenty of anecdotal evidencethat in-context-learning (ICL) is fragile. In this paper, we show that in addition to the quantity and quality of examples, the order in which the in-context examples are listed in the prompt affects the output of the LLM and, consequently, their performance. While prior work has explored improving ICL through dataset-dependent techniques, we introduce , a purely inference-time, dataset-free optimization method that efficiently determines the best example order. OptiSeq leverages log probabilities of LLM-generated outputs to systematically prune the search space of possible orderings and recommend the best order(s) by distinguishing orderings that yield high levels of accuracy and those that underperform. Extensive empirical evaluation on multiple LLMs, datasets, and prompts demonstrates that OptiSeq improves accuracy by 5.5 - 10.5 percentage points across multiple tasks.</abstract>
<identifier type="citekey">bhope-etal-2025-optiseq</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.1353/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>24864</start>
<end>24887</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T OptiSeq: Ordering Examples On-The-Fly for In-Context Learning
%A Bhope, Rahul Atul
%A Venkateswaran, Praveen
%A Jayaram, K. R.
%A Isahagian, Vatche
%A Muthusamy, Vinod
%A Venkatasubramanian, Nalini
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F bhope-etal-2025-optiseq
%X Developers using LLMs and LLM-based agents in their applications have provided plenty of anecdotal evidencethat in-context-learning (ICL) is fragile. In this paper, we show that in addition to the quantity and quality of examples, the order in which the in-context examples are listed in the prompt affects the output of the LLM and, consequently, their performance. While prior work has explored improving ICL through dataset-dependent techniques, we introduce , a purely inference-time, dataset-free optimization method that efficiently determines the best example order. OptiSeq leverages log probabilities of LLM-generated outputs to systematically prune the search space of possible orderings and recommend the best order(s) by distinguishing orderings that yield high levels of accuracy and those that underperform. Extensive empirical evaluation on multiple LLMs, datasets, and prompts demonstrates that OptiSeq improves accuracy by 5.5 - 10.5 percentage points across multiple tasks.
%U https://aclanthology.org/2025.findings-emnlp.1353/
%P 24864-24887
Markdown (Informal)
[OptiSeq: Ordering Examples On-The-Fly for In-Context Learning](https://aclanthology.org/2025.findings-emnlp.1353/) (Bhope et al., Findings 2025)
ACL
- Rahul Atul Bhope, Praveen Venkateswaran, K. R. Jayaram, Vatche Isahagian, Vinod Muthusamy, and Nalini Venkatasubramanian. 2025. OptiSeq: Ordering Examples On-The-Fly for In-Context Learning. In Findings of the Association for Computational Linguistics: EMNLP 2025, pages 24864–24887, Suzhou, China. Association for Computational Linguistics.