@inproceedings{muti-2024-pejorativity,
title = "{P}ejorativ{IT}y - In-Context Pejorative Language Disambiguation: A {CALAMITA} Challenge",
author = "Muti, Arianna",
editor = "Dell'Orletta, Felice and
Lenci, Alessandro and
Montemagni, Simonetta and
Sprugnoli, Rachele",
booktitle = "Proceedings of the 10th Italian Conference on Computational Linguistics (CLiC-it 2024)",
month = dec,
year = "2024",
address = "Pisa, Italy",
publisher = "CEUR Workshop Proceedings",
url = "https://aclanthology.org/2024.clicit-1.136/",
pages = "1228--1233",
ISBN = "979-12-210-7060-6",
abstract = "Misogyny is often expressed through figurative language. Some neutral words can assume a negative connotation when functioning as pejorative epithets, and they can be used to express misogyny. Disambiguating the meaning of such terms might help the detection of misogyny. This challenge addresses a) the disambiguation of specific ambiguous words in a given context; b) the detection of misogyny in instances that contain such polysemic words. In particular, framed as a binary classification, our task is divided into two parts. In Task A, the model is asked to define if, given a tweet, the target word is used in pejorative or non-pejorative way. In Task B, the model is asked whether the whole sentence is misogynous or not."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="muti-2024-pejorativity">
<titleInfo>
<title>PejorativITy - In-Context Pejorative Language Disambiguation: A CALAMITA Challenge</title>
</titleInfo>
<name type="personal">
<namePart type="given">Arianna</namePart>
<namePart type="family">Muti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 10th Italian Conference on Computational Linguistics (CLiC-it 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Felice</namePart>
<namePart type="family">Dell’Orletta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Lenci</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Simonetta</namePart>
<namePart type="family">Montemagni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rachele</namePart>
<namePart type="family">Sprugnoli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>CEUR Workshop Proceedings</publisher>
<place>
<placeTerm type="text">Pisa, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-12-210-7060-6</identifier>
</relatedItem>
<abstract>Misogyny is often expressed through figurative language. Some neutral words can assume a negative connotation when functioning as pejorative epithets, and they can be used to express misogyny. Disambiguating the meaning of such terms might help the detection of misogyny. This challenge addresses a) the disambiguation of specific ambiguous words in a given context; b) the detection of misogyny in instances that contain such polysemic words. In particular, framed as a binary classification, our task is divided into two parts. In Task A, the model is asked to define if, given a tweet, the target word is used in pejorative or non-pejorative way. In Task B, the model is asked whether the whole sentence is misogynous or not.</abstract>
<identifier type="citekey">muti-2024-pejorativity</identifier>
<location>
<url>https://aclanthology.org/2024.clicit-1.136/</url>
</location>
<part>
<date>2024-12</date>
<extent unit="page">
<start>1228</start>
<end>1233</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T PejorativITy - In-Context Pejorative Language Disambiguation: A CALAMITA Challenge
%A Muti, Arianna
%Y Dell’Orletta, Felice
%Y Lenci, Alessandro
%Y Montemagni, Simonetta
%Y Sprugnoli, Rachele
%S Proceedings of the 10th Italian Conference on Computational Linguistics (CLiC-it 2024)
%D 2024
%8 December
%I CEUR Workshop Proceedings
%C Pisa, Italy
%@ 979-12-210-7060-6
%F muti-2024-pejorativity
%X Misogyny is often expressed through figurative language. Some neutral words can assume a negative connotation when functioning as pejorative epithets, and they can be used to express misogyny. Disambiguating the meaning of such terms might help the detection of misogyny. This challenge addresses a) the disambiguation of specific ambiguous words in a given context; b) the detection of misogyny in instances that contain such polysemic words. In particular, framed as a binary classification, our task is divided into two parts. In Task A, the model is asked to define if, given a tweet, the target word is used in pejorative or non-pejorative way. In Task B, the model is asked whether the whole sentence is misogynous or not.
%U https://aclanthology.org/2024.clicit-1.136/
%P 1228-1233
Markdown (Informal)
[PejorativITy - In-Context Pejorative Language Disambiguation: A CALAMITA Challenge](https://aclanthology.org/2024.clicit-1.136/) (Muti, CLiC-it 2024)
ACL