@inproceedings{zhao-etal-2024-measuring,
title = "Measuring the Inconsistency of Large Language Models in Preferential Ranking",
author = "Zhao, Xiutian and
Wang, Ke and
Peng, Wei",
editor = "Li, Sha and
Li, Manling and
Zhang, Michael JQ and
Choi, Eunsol and
Geva, Mor and
Hase, Peter and
Ji, Heng",
booktitle = "Proceedings of the 1st Workshop on Towards Knowledgeable Language Models (KnowLLM 2024)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.knowllm-1.14",
doi = "10.18653/v1/2024.knowllm-1.14",
pages = "171--176",
abstract = "Despite large language models{'} (LLMs{'}) recent advancements, their bias and hallucination issues persist, and their ability to offer consistent and preferential rankings remains underexplored. This study investigates the capacity of LLMs to provide consistent ordinal preferences, a crucial aspect in scenarios lacking absolute answers. We introduce a formalization of consistency based on order theory, outlining criteria such as transitivity, asymmetry, reversibility, and independence from irrelevant alternatives. Our diagnostic experiments on selected state-of-the-art LLMs reveal their inability to meet these criteria, indicating a strong positional bias and poor transitivity, with preferences easily swayed by irrelevant alternatives. These findings highlight a significant inconsistency in LLM-generated preferential rankings, underscoring the need for further research to address these limitations.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhao-etal-2024-measuring">
<titleInfo>
<title>Measuring the Inconsistency of Large Language Models in Preferential Ranking</title>
</titleInfo>
<name type="personal">
<namePart type="given">Xiutian</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ke</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wei</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Towards Knowledgeable Language Models (KnowLLM 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sha</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Manling</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="given">JQ</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eunsol</namePart>
<namePart type="family">Choi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mor</namePart>
<namePart type="family">Geva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peter</namePart>
<namePart type="family">Hase</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Heng</namePart>
<namePart type="family">Ji</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Despite large language models’ (LLMs’) recent advancements, their bias and hallucination issues persist, and their ability to offer consistent and preferential rankings remains underexplored. This study investigates the capacity of LLMs to provide consistent ordinal preferences, a crucial aspect in scenarios lacking absolute answers. We introduce a formalization of consistency based on order theory, outlining criteria such as transitivity, asymmetry, reversibility, and independence from irrelevant alternatives. Our diagnostic experiments on selected state-of-the-art LLMs reveal their inability to meet these criteria, indicating a strong positional bias and poor transitivity, with preferences easily swayed by irrelevant alternatives. These findings highlight a significant inconsistency in LLM-generated preferential rankings, underscoring the need for further research to address these limitations.</abstract>
<identifier type="citekey">zhao-etal-2024-measuring</identifier>
<identifier type="doi">10.18653/v1/2024.knowllm-1.14</identifier>
<location>
<url>https://aclanthology.org/2024.knowllm-1.14</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>171</start>
<end>176</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Measuring the Inconsistency of Large Language Models in Preferential Ranking
%A Zhao, Xiutian
%A Wang, Ke
%A Peng, Wei
%Y Li, Sha
%Y Li, Manling
%Y Zhang, Michael JQ
%Y Choi, Eunsol
%Y Geva, Mor
%Y Hase, Peter
%Y Ji, Heng
%S Proceedings of the 1st Workshop on Towards Knowledgeable Language Models (KnowLLM 2024)
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F zhao-etal-2024-measuring
%X Despite large language models’ (LLMs’) recent advancements, their bias and hallucination issues persist, and their ability to offer consistent and preferential rankings remains underexplored. This study investigates the capacity of LLMs to provide consistent ordinal preferences, a crucial aspect in scenarios lacking absolute answers. We introduce a formalization of consistency based on order theory, outlining criteria such as transitivity, asymmetry, reversibility, and independence from irrelevant alternatives. Our diagnostic experiments on selected state-of-the-art LLMs reveal their inability to meet these criteria, indicating a strong positional bias and poor transitivity, with preferences easily swayed by irrelevant alternatives. These findings highlight a significant inconsistency in LLM-generated preferential rankings, underscoring the need for further research to address these limitations.
%R 10.18653/v1/2024.knowllm-1.14
%U https://aclanthology.org/2024.knowllm-1.14
%U https://doi.org/10.18653/v1/2024.knowllm-1.14
%P 171-176
Markdown (Informal)
[Measuring the Inconsistency of Large Language Models in Preferential Ranking](https://aclanthology.org/2024.knowllm-1.14) (Zhao et al., KnowLLM-WS 2024)
ACL