@inproceedings{cong-2022-pre,
title = "Pre-trained Language Models{'} Interpretation of Evaluativity Implicature: Evidence from Gradable Adjectives Usage in Context",
author = "Cong, Yan",
editor = "Pyatkin, Valentina and
Fried, Daniel and
Anthonio, Talita",
booktitle = "Proceedings of the Second Workshop on Understanding Implicit and Underspecified Language",
month = jul,
year = "2022",
address = "Seattle, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.unimplicit-1.1",
doi = "10.18653/v1/2022.unimplicit-1.1",
pages = "1--7",
abstract = "By saying Maria is tall, a human speaker typically implies that Maria is evaluatively tall from the speaker{'}s perspective. However, by using a different construction Maria is taller than Sophie, we cannot infer from Maria and Sophie{'}s relative heights that Maria is evaluatively tall because it is possible for Maria to be taller than Sophie in a context in which they both count as short. Can pre-trained language models (LMs) {``}understand{''} evaulativity (EVAL) inference? To what extent can they discern the EVAL salience of different constructions in a conversation? Will it help LMs{'} implicitness performance if we give LMs a persona such as chill, social, and pragmatically skilled? Our study provides an approach to probing LMs{'} interpretation of EVAL inference by incorporating insights from experimental pragmatics and sociolinguistics. We find that with the appropriate prompt, LMs can succeed in some pragmatic level language understanding tasks. Our study suggests that socio-pragmatics methodology can shed light on the challenging questions in NLP.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="cong-2022-pre">
<titleInfo>
<title>Pre-trained Language Models’ Interpretation of Evaluativity Implicature: Evidence from Gradable Adjectives Usage in Context</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yan</namePart>
<namePart type="family">Cong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Understanding Implicit and Underspecified Language</title>
</titleInfo>
<name type="personal">
<namePart type="given">Valentina</namePart>
<namePart type="family">Pyatkin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Fried</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Talita</namePart>
<namePart type="family">Anthonio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>By saying Maria is tall, a human speaker typically implies that Maria is evaluatively tall from the speaker’s perspective. However, by using a different construction Maria is taller than Sophie, we cannot infer from Maria and Sophie’s relative heights that Maria is evaluatively tall because it is possible for Maria to be taller than Sophie in a context in which they both count as short. Can pre-trained language models (LMs) “understand” evaulativity (EVAL) inference? To what extent can they discern the EVAL salience of different constructions in a conversation? Will it help LMs’ implicitness performance if we give LMs a persona such as chill, social, and pragmatically skilled? Our study provides an approach to probing LMs’ interpretation of EVAL inference by incorporating insights from experimental pragmatics and sociolinguistics. We find that with the appropriate prompt, LMs can succeed in some pragmatic level language understanding tasks. Our study suggests that socio-pragmatics methodology can shed light on the challenging questions in NLP.</abstract>
<identifier type="citekey">cong-2022-pre</identifier>
<identifier type="doi">10.18653/v1/2022.unimplicit-1.1</identifier>
<location>
<url>https://aclanthology.org/2022.unimplicit-1.1</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>1</start>
<end>7</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Pre-trained Language Models’ Interpretation of Evaluativity Implicature: Evidence from Gradable Adjectives Usage in Context
%A Cong, Yan
%Y Pyatkin, Valentina
%Y Fried, Daniel
%Y Anthonio, Talita
%S Proceedings of the Second Workshop on Understanding Implicit and Underspecified Language
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, USA
%F cong-2022-pre
%X By saying Maria is tall, a human speaker typically implies that Maria is evaluatively tall from the speaker’s perspective. However, by using a different construction Maria is taller than Sophie, we cannot infer from Maria and Sophie’s relative heights that Maria is evaluatively tall because it is possible for Maria to be taller than Sophie in a context in which they both count as short. Can pre-trained language models (LMs) “understand” evaulativity (EVAL) inference? To what extent can they discern the EVAL salience of different constructions in a conversation? Will it help LMs’ implicitness performance if we give LMs a persona such as chill, social, and pragmatically skilled? Our study provides an approach to probing LMs’ interpretation of EVAL inference by incorporating insights from experimental pragmatics and sociolinguistics. We find that with the appropriate prompt, LMs can succeed in some pragmatic level language understanding tasks. Our study suggests that socio-pragmatics methodology can shed light on the challenging questions in NLP.
%R 10.18653/v1/2022.unimplicit-1.1
%U https://aclanthology.org/2022.unimplicit-1.1
%U https://doi.org/10.18653/v1/2022.unimplicit-1.1
%P 1-7
Markdown (Informal)
[Pre-trained Language Models’ Interpretation of Evaluativity Implicature: Evidence from Gradable Adjectives Usage in Context](https://aclanthology.org/2022.unimplicit-1.1) (Cong, unimplicit 2022)
ACL