@inproceedings{khanehzar-etal-2023-probing,
title = "Probing Power by Prompting: Harnessing Pre-trained Language Models for Power Connotation Framing",
author = "Khanehzar, Shima and
Cohn, Trevor and
Mikolajczak, Gosia and
Frermann, Lea",
booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.eacl-main.61",
pages = "873--885",
abstract = "When describing actions, subtle changes in word choice can evoke very different associations with the involved entities. For instance, a company {`}\textit{employing} workers{'} evokes a more positive connotation than the one {`}\textit{exploiting}{'} them. This concept is called \textit{connotation}. This paper investigates whether pre-trained language models (PLMs) encode such subtle connotative information about \textit{power differentials} between involved entities. We design a probing framework for power connotation, building on (CITATION){'}s operationalization of \textit{connotation frames}. We show that zero-shot prompting of PLMs leads to above chance prediction of power connotation, however fine-tuning PLMs using our framework drastically improves their accuracy. Using our fine-tuned models, we present a case study of \textit{power dynamics} in US news reporting on immigration, showing the potential of our framework as a tool for understanding subtle bias in the media.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="khanehzar-etal-2023-probing">
<titleInfo>
<title>Probing Power by Prompting: Harnessing Pre-trained Language Models for Power Connotation Framing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shima</namePart>
<namePart type="family">Khanehzar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Trevor</namePart>
<namePart type="family">Cohn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gosia</namePart>
<namePart type="family">Mikolajczak</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lea</namePart>
<namePart type="family">Frermann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dubrovnik, Croatia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>When describing actions, subtle changes in word choice can evoke very different associations with the involved entities. For instance, a company ‘employing workers’ evokes a more positive connotation than the one ‘exploiting’ them. This concept is called connotation. This paper investigates whether pre-trained language models (PLMs) encode such subtle connotative information about power differentials between involved entities. We design a probing framework for power connotation, building on (CITATION)’s operationalization of connotation frames. We show that zero-shot prompting of PLMs leads to above chance prediction of power connotation, however fine-tuning PLMs using our framework drastically improves their accuracy. Using our fine-tuned models, we present a case study of power dynamics in US news reporting on immigration, showing the potential of our framework as a tool for understanding subtle bias in the media.</abstract>
<identifier type="citekey">khanehzar-etal-2023-probing</identifier>
<location>
<url>https://aclanthology.org/2023.eacl-main.61</url>
</location>
<part>
<date>2023-05</date>
<extent unit="page">
<start>873</start>
<end>885</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Probing Power by Prompting: Harnessing Pre-trained Language Models for Power Connotation Framing
%A Khanehzar, Shima
%A Cohn, Trevor
%A Mikolajczak, Gosia
%A Frermann, Lea
%S Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics
%D 2023
%8 May
%I Association for Computational Linguistics
%C Dubrovnik, Croatia
%F khanehzar-etal-2023-probing
%X When describing actions, subtle changes in word choice can evoke very different associations with the involved entities. For instance, a company ‘employing workers’ evokes a more positive connotation than the one ‘exploiting’ them. This concept is called connotation. This paper investigates whether pre-trained language models (PLMs) encode such subtle connotative information about power differentials between involved entities. We design a probing framework for power connotation, building on (CITATION)’s operationalization of connotation frames. We show that zero-shot prompting of PLMs leads to above chance prediction of power connotation, however fine-tuning PLMs using our framework drastically improves their accuracy. Using our fine-tuned models, we present a case study of power dynamics in US news reporting on immigration, showing the potential of our framework as a tool for understanding subtle bias in the media.
%U https://aclanthology.org/2023.eacl-main.61
%P 873-885
Markdown (Informal)
[Probing Power by Prompting: Harnessing Pre-trained Language Models for Power Connotation Framing](https://aclanthology.org/2023.eacl-main.61) (Khanehzar et al., EACL 2023)
ACL