@inproceedings{ajjour-wachsmuth-2025-exploring,
title = "Exploring {LLM} Priming Strategies for Few-Shot Stance Classification",
author = "Ajjour, Yamen and
Wachsmuth, Henning",
editor = "Chistova, Elena and
Cimiano, Philipp and
Haddadan, Shohreh and
Lapesa, Gabriella and
Ruiz-Dolz, Ramon",
booktitle = "Proceedings of the 12th Argument mining Workshop",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.argmining-1.2/",
doi = "10.18653/v1/2025.argmining-1.2",
pages = "11--23",
ISBN = "979-8-89176-258-9",
abstract = "Large language models (LLMs) are effective in predicting the labels of unseen target instances if instructed for the task and training instances via the prompt. LLMs generate a text with higher probability if the prompt contains text with similar characteristics, a phenomenon, called priming, that especially affects argumentation. An open question in NLP is how to systematically exploit priming to choose a set of instances suitable for a given task. For stance classification, LLMs may be primed with few-shot instances prior to identifying whether a given argument is pro or con a topic. In this paper, we explore two priming strategies for few-shot stance classification: one takes those instances that are most semantically similar, and the other chooses those that are most stance-similar. Experiments on three common stance datasets suggest that priming an LLM with stance-similar instances is particularly effective in few-shot stance classification compared to baseline strategies, and behaves largely consistently across different LLM variants."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ajjour-wachsmuth-2025-exploring">
<titleInfo>
<title>Exploring LLM Priming Strategies for Few-Shot Stance Classification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yamen</namePart>
<namePart type="family">Ajjour</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Henning</namePart>
<namePart type="family">Wachsmuth</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 12th Argument mining Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Elena</namePart>
<namePart type="family">Chistova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Philipp</namePart>
<namePart type="family">Cimiano</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shohreh</namePart>
<namePart type="family">Haddadan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gabriella</namePart>
<namePart type="family">Lapesa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ramon</namePart>
<namePart type="family">Ruiz-Dolz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-258-9</identifier>
</relatedItem>
<abstract>Large language models (LLMs) are effective in predicting the labels of unseen target instances if instructed for the task and training instances via the prompt. LLMs generate a text with higher probability if the prompt contains text with similar characteristics, a phenomenon, called priming, that especially affects argumentation. An open question in NLP is how to systematically exploit priming to choose a set of instances suitable for a given task. For stance classification, LLMs may be primed with few-shot instances prior to identifying whether a given argument is pro or con a topic. In this paper, we explore two priming strategies for few-shot stance classification: one takes those instances that are most semantically similar, and the other chooses those that are most stance-similar. Experiments on three common stance datasets suggest that priming an LLM with stance-similar instances is particularly effective in few-shot stance classification compared to baseline strategies, and behaves largely consistently across different LLM variants.</abstract>
<identifier type="citekey">ajjour-wachsmuth-2025-exploring</identifier>
<identifier type="doi">10.18653/v1/2025.argmining-1.2</identifier>
<location>
<url>https://aclanthology.org/2025.argmining-1.2/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>11</start>
<end>23</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Exploring LLM Priming Strategies for Few-Shot Stance Classification
%A Ajjour, Yamen
%A Wachsmuth, Henning
%Y Chistova, Elena
%Y Cimiano, Philipp
%Y Haddadan, Shohreh
%Y Lapesa, Gabriella
%Y Ruiz-Dolz, Ramon
%S Proceedings of the 12th Argument mining Workshop
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-258-9
%F ajjour-wachsmuth-2025-exploring
%X Large language models (LLMs) are effective in predicting the labels of unseen target instances if instructed for the task and training instances via the prompt. LLMs generate a text with higher probability if the prompt contains text with similar characteristics, a phenomenon, called priming, that especially affects argumentation. An open question in NLP is how to systematically exploit priming to choose a set of instances suitable for a given task. For stance classification, LLMs may be primed with few-shot instances prior to identifying whether a given argument is pro or con a topic. In this paper, we explore two priming strategies for few-shot stance classification: one takes those instances that are most semantically similar, and the other chooses those that are most stance-similar. Experiments on three common stance datasets suggest that priming an LLM with stance-similar instances is particularly effective in few-shot stance classification compared to baseline strategies, and behaves largely consistently across different LLM variants.
%R 10.18653/v1/2025.argmining-1.2
%U https://aclanthology.org/2025.argmining-1.2/
%U https://doi.org/10.18653/v1/2025.argmining-1.2
%P 11-23
Markdown (Informal)
[Exploring LLM Priming Strategies for Few-Shot Stance Classification](https://aclanthology.org/2025.argmining-1.2/) (Ajjour & Wachsmuth, ArgMining 2025)
ACL