@inproceedings{tint-2025-guardrails,
title = "Guardrails, not Guidance: Understanding Responses to {LGBTQ}+ Language in Large Language Models",
author = "Tint, Joshua",
editor = "Pranav, A and
Valentine, Alissa and
Bhatt, Shaily and
Long, Yanan and
Subramonian, Arjun and
Bertsch, Amanda and
Lauscher, Anne and
Gupta, Ankush",
booktitle = "Proceedings of the Queer in AI Workshop",
month = may,
year = "2025",
address = "Hybrid format (in-person and virtual)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.queerinai-main.2/",
doi = "10.18653/v1/2025.queerinai-main.2",
pages = "6--16",
ISBN = "979-8-89176-244-2",
abstract = "Language models have integrated themselves into many aspects of digital life, shaping everything from social media to translation. This paper investigates how large language models (LLMs) respond to LGBTQ+ slang and heteronormative language. Through two experiments, the study assesses the emotional content and the impact of queer slang on responses from models including GPT-3.5, GPT-4o, Llama2, Llama3, Gemma and Mistral. The findings reveal that heteronormative prompts can trigger safety mechanisms, leading to neutral or corrective responses, while LGBTQ+ slang elicits more negative emotions. These insights punctuate the need to provide equitable outcomes for minority slangs and argots, in addition to eliminating explicit bigotry from language models."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tint-2025-guardrails">
<titleInfo>
<title>Guardrails, not Guidance: Understanding Responses to LGBTQ+ Language in Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Joshua</namePart>
<namePart type="family">Tint</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Queer in AI Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">A</namePart>
<namePart type="family">Pranav</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alissa</namePart>
<namePart type="family">Valentine</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shaily</namePart>
<namePart type="family">Bhatt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yanan</namePart>
<namePart type="family">Long</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arjun</namePart>
<namePart type="family">Subramonian</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amanda</namePart>
<namePart type="family">Bertsch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anne</namePart>
<namePart type="family">Lauscher</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ankush</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Hybrid format (in-person and virtual)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-244-2</identifier>
</relatedItem>
<abstract>Language models have integrated themselves into many aspects of digital life, shaping everything from social media to translation. This paper investigates how large language models (LLMs) respond to LGBTQ+ slang and heteronormative language. Through two experiments, the study assesses the emotional content and the impact of queer slang on responses from models including GPT-3.5, GPT-4o, Llama2, Llama3, Gemma and Mistral. The findings reveal that heteronormative prompts can trigger safety mechanisms, leading to neutral or corrective responses, while LGBTQ+ slang elicits more negative emotions. These insights punctuate the need to provide equitable outcomes for minority slangs and argots, in addition to eliminating explicit bigotry from language models.</abstract>
<identifier type="citekey">tint-2025-guardrails</identifier>
<identifier type="doi">10.18653/v1/2025.queerinai-main.2</identifier>
<location>
<url>https://aclanthology.org/2025.queerinai-main.2/</url>
</location>
<part>
<date>2025-05</date>
<extent unit="page">
<start>6</start>
<end>16</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Guardrails, not Guidance: Understanding Responses to LGBTQ+ Language in Large Language Models
%A Tint, Joshua
%Y Pranav, A.
%Y Valentine, Alissa
%Y Bhatt, Shaily
%Y Long, Yanan
%Y Subramonian, Arjun
%Y Bertsch, Amanda
%Y Lauscher, Anne
%Y Gupta, Ankush
%S Proceedings of the Queer in AI Workshop
%D 2025
%8 May
%I Association for Computational Linguistics
%C Hybrid format (in-person and virtual)
%@ 979-8-89176-244-2
%F tint-2025-guardrails
%X Language models have integrated themselves into many aspects of digital life, shaping everything from social media to translation. This paper investigates how large language models (LLMs) respond to LGBTQ+ slang and heteronormative language. Through two experiments, the study assesses the emotional content and the impact of queer slang on responses from models including GPT-3.5, GPT-4o, Llama2, Llama3, Gemma and Mistral. The findings reveal that heteronormative prompts can trigger safety mechanisms, leading to neutral or corrective responses, while LGBTQ+ slang elicits more negative emotions. These insights punctuate the need to provide equitable outcomes for minority slangs and argots, in addition to eliminating explicit bigotry from language models.
%R 10.18653/v1/2025.queerinai-main.2
%U https://aclanthology.org/2025.queerinai-main.2/
%U https://doi.org/10.18653/v1/2025.queerinai-main.2
%P 6-16
Markdown (Informal)
[Guardrails, not Guidance: Understanding Responses to LGBTQ+ Language in Large Language Models](https://aclanthology.org/2025.queerinai-main.2/) (Tint, QueerInAI 2025)
ACL