@article{resnik-2025-large,
title = "Large Language Models Are Biased Because They Are Large Language Models",
author = "Resnik, Philip",
journal = "Computational Linguistics",
volume = "51",
number = "3",
month = sep,
year = "2025",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/2025.cl-3.6/",
doi = "10.1162/coli_a_00558",
pages = "885--906",
abstract = "This position paper{'}s primary goal is to provoke thoughtful discussion about the relationship between bias and fundamental properties of large language models (LLMs). I do this by seeking to convince the reader that harmful biases are an inevitable consequence arising from the design of any large language model as LLMs are currently formulated. To the extent that this is true, it suggests that the problem of harmful bias cannot be properly addressed without a serious reconsideration of AI driven by LLMs, going back to the foundational assumptions underlying their design."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="resnik-2025-large">
<titleInfo>
<title>Large Language Models Are Biased Because They Are Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Philip</namePart>
<namePart type="family">Resnik</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>This position paper’s primary goal is to provoke thoughtful discussion about the relationship between bias and fundamental properties of large language models (LLMs). I do this by seeking to convince the reader that harmful biases are an inevitable consequence arising from the design of any large language model as LLMs are currently formulated. To the extent that this is true, it suggests that the problem of harmful bias cannot be properly addressed without a serious reconsideration of AI driven by LLMs, going back to the foundational assumptions underlying their design.</abstract>
<identifier type="citekey">resnik-2025-large</identifier>
<identifier type="doi">10.1162/coli_a_00558</identifier>
<location>
<url>https://aclanthology.org/2025.cl-3.6/</url>
</location>
<part>
<date>2025-09</date>
<detail type="volume"><number>51</number></detail>
<detail type="issue"><number>3</number></detail>
<extent unit="page">
<start>885</start>
<end>906</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T Large Language Models Are Biased Because They Are Large Language Models
%A Resnik, Philip
%J Computational Linguistics
%D 2025
%8 September
%V 51
%N 3
%I MIT Press
%C Cambridge, MA
%F resnik-2025-large
%X This position paper’s primary goal is to provoke thoughtful discussion about the relationship between bias and fundamental properties of large language models (LLMs). I do this by seeking to convince the reader that harmful biases are an inevitable consequence arising from the design of any large language model as LLMs are currently formulated. To the extent that this is true, it suggests that the problem of harmful bias cannot be properly addressed without a serious reconsideration of AI driven by LLMs, going back to the foundational assumptions underlying their design.
%R 10.1162/coli_a_00558
%U https://aclanthology.org/2025.cl-3.6/
%U https://doi.org/10.1162/coli_a_00558
%P 885-906
Markdown (Informal)
[Large Language Models Are Biased Because They Are Large Language Models](https://aclanthology.org/2025.cl-3.6/) (Resnik, CL 2025)
ACL