@inproceedings{liu-etal-2025-coranking,
title = "{C}o{R}anking: Collaborative Ranking with Small and Large Ranking Agents",
author = "Liu, Wenhan and
Ma, Xinyu and
Zhu, Yutao and
Su, Lixin and
Wang, Shuaiqiang and
Yin, Dawei and
Dou, Zhicheng",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.273/",
pages = "5098--5110",
ISBN = "979-8-89176-335-7",
abstract = "Listwise ranking based on Large Language Models (LLMs) has achieved state-of-the-art performance in Information Retrieval (IR).However, their effectiveness often depends on LLMs with massive parameter scales and computationally expensive sliding window processing, leading to substantial efficiency bottlenecks. In this paper, we propose a Collaborative Ranking framework (\textbf{CoRanking}) for LLM-based listwise ranking.Specifically, we strategically combine an efficient \textit{small} reranker and an effective \textit{large} reranker for collaborative ranking.The \textit{small} reranker performs initial passage ranking, effectively filtering the passage set to a condensed top-k list (e.g., top-20 passages), and the \textit{large} reranker (with stronger ranking capability) then reranks only this condensed subset rather than the full list, significantly improving efficiency. We further address that directly passing the top-ranked passages from the small reranker to the large reranker is suboptimal because of the LLM{'}s strong positional bias in processing input sequences. To resolve this issue, we propose a passage order adjuster learned by RL that dynamically reorders the top passages returned by the small reranker to better align with the large LLM{'}s input preferences. Our extensive experiments across three IR benchmarks demonstrate that CoRanking achieves superior efficiency, reducing ranking latency by approximately 70{\%} while simultaneously improving effectiveness, compared to the standalone large reranker."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="liu-etal-2025-coranking">
<titleInfo>
<title>CoRanking: Collaborative Ranking with Small and Large Ranking Agents</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wenhan</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xinyu</namePart>
<namePart type="family">Ma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yutao</namePart>
<namePart type="family">Zhu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lixin</namePart>
<namePart type="family">Su</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shuaiqiang</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dawei</namePart>
<namePart type="family">Yin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhicheng</namePart>
<namePart type="family">Dou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>Listwise ranking based on Large Language Models (LLMs) has achieved state-of-the-art performance in Information Retrieval (IR).However, their effectiveness often depends on LLMs with massive parameter scales and computationally expensive sliding window processing, leading to substantial efficiency bottlenecks. In this paper, we propose a Collaborative Ranking framework (CoRanking) for LLM-based listwise ranking.Specifically, we strategically combine an efficient small reranker and an effective large reranker for collaborative ranking.The small reranker performs initial passage ranking, effectively filtering the passage set to a condensed top-k list (e.g., top-20 passages), and the large reranker (with stronger ranking capability) then reranks only this condensed subset rather than the full list, significantly improving efficiency. We further address that directly passing the top-ranked passages from the small reranker to the large reranker is suboptimal because of the LLM’s strong positional bias in processing input sequences. To resolve this issue, we propose a passage order adjuster learned by RL that dynamically reorders the top passages returned by the small reranker to better align with the large LLM’s input preferences. Our extensive experiments across three IR benchmarks demonstrate that CoRanking achieves superior efficiency, reducing ranking latency by approximately 70% while simultaneously improving effectiveness, compared to the standalone large reranker.</abstract>
<identifier type="citekey">liu-etal-2025-coranking</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.273/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>5098</start>
<end>5110</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T CoRanking: Collaborative Ranking with Small and Large Ranking Agents
%A Liu, Wenhan
%A Ma, Xinyu
%A Zhu, Yutao
%A Su, Lixin
%A Wang, Shuaiqiang
%A Yin, Dawei
%A Dou, Zhicheng
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F liu-etal-2025-coranking
%X Listwise ranking based on Large Language Models (LLMs) has achieved state-of-the-art performance in Information Retrieval (IR).However, their effectiveness often depends on LLMs with massive parameter scales and computationally expensive sliding window processing, leading to substantial efficiency bottlenecks. In this paper, we propose a Collaborative Ranking framework (CoRanking) for LLM-based listwise ranking.Specifically, we strategically combine an efficient small reranker and an effective large reranker for collaborative ranking.The small reranker performs initial passage ranking, effectively filtering the passage set to a condensed top-k list (e.g., top-20 passages), and the large reranker (with stronger ranking capability) then reranks only this condensed subset rather than the full list, significantly improving efficiency. We further address that directly passing the top-ranked passages from the small reranker to the large reranker is suboptimal because of the LLM’s strong positional bias in processing input sequences. To resolve this issue, we propose a passage order adjuster learned by RL that dynamically reorders the top passages returned by the small reranker to better align with the large LLM’s input preferences. Our extensive experiments across three IR benchmarks demonstrate that CoRanking achieves superior efficiency, reducing ranking latency by approximately 70% while simultaneously improving effectiveness, compared to the standalone large reranker.
%U https://aclanthology.org/2025.findings-emnlp.273/
%P 5098-5110
Markdown (Informal)
[CoRanking: Collaborative Ranking with Small and Large Ranking Agents](https://aclanthology.org/2025.findings-emnlp.273/) (Liu et al., Findings 2025)
ACL