@inproceedings{liebeskind-lewandowska-tomaszczyk-2024-navigating,
title = "Navigating Opinion Space: A Study of Explicit and Implicit Opinion Generation in Language Models",
author = "Liebeskind, Chaya and
Lewandowska-Tomaszczyk, Barbara",
editor = "Sousa-Silva, Rui and
Cardoso, Henrique Lopes and
Koponen, Maarit and
Lora, Antonio Pareja and
Seresi, M{\'a}rta",
booktitle = "Proceedings of the First LUHME Workshop",
month = oct,
year = "2024",
address = "Santiago de Compostela, Spain",
publisher = "CLUP, Centro de Lingu{\'i}stica da Universidade do Porto FLUP - Faculdade de Letras da Universidade do Porto",
url = "https://aclanthology.org/2024.luhme-1.4/",
pages = "28--34",
abstract = "The paper focuses on testing the use of conversational Large Language Models (LLMs), in particular chatGPT and Google models, instructed to assume the role of linguistics experts to produce opinionated texts, which are defined as subjective statements about animates, things, events or properties, in contrast to knowledge/evidence-based objective factual statements. The taxonomy differentiates between Explicit (Direct or Indirect), and Implicit opinionated texts, further distinguishing between positive and negative, ambiguous, or balanced opinions. Examples of opinionated texts and instances of explicit opinion-marking discourse markers (words and phrases) we identified, as well as instances of opinion-marking mental verbs, evaluative and emotion phraseology, and expressive lexis, were provided in a series of prompts. The model demonstrated accurate identification of Direct and Indirect Explicit opinionated utterances, successfully classifying them according to language-specific properties, while less effective performance was observed for prompts requesting illustrations for Implicitly opinionated texts.To tackle this obstacle, the Chain-of-Thoughts methodology was used. Requested to convert the erroneously recognized opinion instances into factual knowledge sentences, LLMs effectively transformed texts containing explicit markers of opinion. However, the ability to transform Explicit Indirect, and Implicit opinionated texts into factual statements is lacking. This finding is interesting as, while the LLM is supposed to give a linguistic statement with factual information, it might be unaware of implicit opinionated content. Our experiment with the LLMs presents novel prospects for the field of linguistics."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="liebeskind-lewandowska-tomaszczyk-2024-navigating">
<titleInfo>
<title>Navigating Opinion Space: A Study of Explicit and Implicit Opinion Generation in Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Chaya</namePart>
<namePart type="family">Liebeskind</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Barbara</namePart>
<namePart type="family">Lewandowska-Tomaszczyk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First LUHME Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Rui</namePart>
<namePart type="family">Sousa-Silva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Henrique</namePart>
<namePart type="given">Lopes</namePart>
<namePart type="family">Cardoso</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maarit</namePart>
<namePart type="family">Koponen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Antonio</namePart>
<namePart type="given">Pareja</namePart>
<namePart type="family">Lora</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Márta</namePart>
<namePart type="family">Seresi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>CLUP, Centro de Linguística da Universidade do Porto FLUP - Faculdade de Letras da Universidade do Porto</publisher>
<place>
<placeTerm type="text">Santiago de Compostela, Spain</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The paper focuses on testing the use of conversational Large Language Models (LLMs), in particular chatGPT and Google models, instructed to assume the role of linguistics experts to produce opinionated texts, which are defined as subjective statements about animates, things, events or properties, in contrast to knowledge/evidence-based objective factual statements. The taxonomy differentiates between Explicit (Direct or Indirect), and Implicit opinionated texts, further distinguishing between positive and negative, ambiguous, or balanced opinions. Examples of opinionated texts and instances of explicit opinion-marking discourse markers (words and phrases) we identified, as well as instances of opinion-marking mental verbs, evaluative and emotion phraseology, and expressive lexis, were provided in a series of prompts. The model demonstrated accurate identification of Direct and Indirect Explicit opinionated utterances, successfully classifying them according to language-specific properties, while less effective performance was observed for prompts requesting illustrations for Implicitly opinionated texts.To tackle this obstacle, the Chain-of-Thoughts methodology was used. Requested to convert the erroneously recognized opinion instances into factual knowledge sentences, LLMs effectively transformed texts containing explicit markers of opinion. However, the ability to transform Explicit Indirect, and Implicit opinionated texts into factual statements is lacking. This finding is interesting as, while the LLM is supposed to give a linguistic statement with factual information, it might be unaware of implicit opinionated content. Our experiment with the LLMs presents novel prospects for the field of linguistics.</abstract>
<identifier type="citekey">liebeskind-lewandowska-tomaszczyk-2024-navigating</identifier>
<location>
<url>https://aclanthology.org/2024.luhme-1.4/</url>
</location>
<part>
<date>2024-10</date>
<extent unit="page">
<start>28</start>
<end>34</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Navigating Opinion Space: A Study of Explicit and Implicit Opinion Generation in Language Models
%A Liebeskind, Chaya
%A Lewandowska-Tomaszczyk, Barbara
%Y Sousa-Silva, Rui
%Y Cardoso, Henrique Lopes
%Y Koponen, Maarit
%Y Lora, Antonio Pareja
%Y Seresi, Márta
%S Proceedings of the First LUHME Workshop
%D 2024
%8 October
%I CLUP, Centro de Linguística da Universidade do Porto FLUP - Faculdade de Letras da Universidade do Porto
%C Santiago de Compostela, Spain
%F liebeskind-lewandowska-tomaszczyk-2024-navigating
%X The paper focuses on testing the use of conversational Large Language Models (LLMs), in particular chatGPT and Google models, instructed to assume the role of linguistics experts to produce opinionated texts, which are defined as subjective statements about animates, things, events or properties, in contrast to knowledge/evidence-based objective factual statements. The taxonomy differentiates between Explicit (Direct or Indirect), and Implicit opinionated texts, further distinguishing between positive and negative, ambiguous, or balanced opinions. Examples of opinionated texts and instances of explicit opinion-marking discourse markers (words and phrases) we identified, as well as instances of opinion-marking mental verbs, evaluative and emotion phraseology, and expressive lexis, were provided in a series of prompts. The model demonstrated accurate identification of Direct and Indirect Explicit opinionated utterances, successfully classifying them according to language-specific properties, while less effective performance was observed for prompts requesting illustrations for Implicitly opinionated texts.To tackle this obstacle, the Chain-of-Thoughts methodology was used. Requested to convert the erroneously recognized opinion instances into factual knowledge sentences, LLMs effectively transformed texts containing explicit markers of opinion. However, the ability to transform Explicit Indirect, and Implicit opinionated texts into factual statements is lacking. This finding is interesting as, while the LLM is supposed to give a linguistic statement with factual information, it might be unaware of implicit opinionated content. Our experiment with the LLMs presents novel prospects for the field of linguistics.
%U https://aclanthology.org/2024.luhme-1.4/
%P 28-34
Markdown (Informal)
[Navigating Opinion Space: A Study of Explicit and Implicit Opinion Generation in Language Models](https://aclanthology.org/2024.luhme-1.4/) (Liebeskind & Lewandowska-Tomaszczyk, LUHME 2024)
ACL