@inproceedings{qiu-etal-2023-chatgpt,
title = "Does {C}hat{GPT} Resemble Humans in Processing Implicatures?",
author = "Qiu, Zhuang and
Duan, Xufeng and
Cai, Zhenguang",
editor = "Chatzikyriakidis, Stergios and
de Paiva, Valeria",
booktitle = "Proceedings of the 4th Natural Logic Meets Machine Learning Workshop",
month = jun,
year = "2023",
address = "Nancy, France",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.naloma-1.3",
pages = "25--34",
abstract = "Recent advances in large language models (LLMs) and LLM-driven chatbots, such as ChatGPT, have sparked interest in the extent to which these artificial systems possess human-like linguistic abilities. In this study, we assessed ChatGPT{'}s pragmatic capabilities by conducting three preregistered experiments focused on its ability to compute pragmatic implicatures. The first experiment tested whether ChatGPT inhibits the computation of generalized conversational implicatures (GCIs) when explicitly required to process the text{'}s truth-conditional meaning. The second and third experiments examined whether the communicative context affects ChatGPT{'}s ability to compute scalar implicatures (SIs). Our results showed that ChatGPT did not demonstrate human-like flexibility in switching between pragmatic and semantic processing. Additionally, ChatGPT{'}s judgments did not exhibit the well-established effect of communicative context on SI rates.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="qiu-etal-2023-chatgpt">
<titleInfo>
<title>Does ChatGPT Resemble Humans in Processing Implicatures?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhuang</namePart>
<namePart type="family">Qiu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xufeng</namePart>
<namePart type="family">Duan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhenguang</namePart>
<namePart type="family">Cai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 4th Natural Logic Meets Machine Learning Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Stergios</namePart>
<namePart type="family">Chatzikyriakidis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Valeria</namePart>
<namePart type="family">de Paiva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Nancy, France</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent advances in large language models (LLMs) and LLM-driven chatbots, such as ChatGPT, have sparked interest in the extent to which these artificial systems possess human-like linguistic abilities. In this study, we assessed ChatGPT’s pragmatic capabilities by conducting three preregistered experiments focused on its ability to compute pragmatic implicatures. The first experiment tested whether ChatGPT inhibits the computation of generalized conversational implicatures (GCIs) when explicitly required to process the text’s truth-conditional meaning. The second and third experiments examined whether the communicative context affects ChatGPT’s ability to compute scalar implicatures (SIs). Our results showed that ChatGPT did not demonstrate human-like flexibility in switching between pragmatic and semantic processing. Additionally, ChatGPT’s judgments did not exhibit the well-established effect of communicative context on SI rates.</abstract>
<identifier type="citekey">qiu-etal-2023-chatgpt</identifier>
<location>
<url>https://aclanthology.org/2023.naloma-1.3</url>
</location>
<part>
<date>2023-06</date>
<extent unit="page">
<start>25</start>
<end>34</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Does ChatGPT Resemble Humans in Processing Implicatures?
%A Qiu, Zhuang
%A Duan, Xufeng
%A Cai, Zhenguang
%Y Chatzikyriakidis, Stergios
%Y de Paiva, Valeria
%S Proceedings of the 4th Natural Logic Meets Machine Learning Workshop
%D 2023
%8 June
%I Association for Computational Linguistics
%C Nancy, France
%F qiu-etal-2023-chatgpt
%X Recent advances in large language models (LLMs) and LLM-driven chatbots, such as ChatGPT, have sparked interest in the extent to which these artificial systems possess human-like linguistic abilities. In this study, we assessed ChatGPT’s pragmatic capabilities by conducting three preregistered experiments focused on its ability to compute pragmatic implicatures. The first experiment tested whether ChatGPT inhibits the computation of generalized conversational implicatures (GCIs) when explicitly required to process the text’s truth-conditional meaning. The second and third experiments examined whether the communicative context affects ChatGPT’s ability to compute scalar implicatures (SIs). Our results showed that ChatGPT did not demonstrate human-like flexibility in switching between pragmatic and semantic processing. Additionally, ChatGPT’s judgments did not exhibit the well-established effect of communicative context on SI rates.
%U https://aclanthology.org/2023.naloma-1.3
%P 25-34
Markdown (Informal)
[Does ChatGPT Resemble Humans in Processing Implicatures?](https://aclanthology.org/2023.naloma-1.3) (Qiu et al., NALOMA-WS 2023)
ACL