@inproceedings{huynh-etal-2023-implicit,
title = "Implicit causality in {GPT}-2: a case study",
author = "Huynh, Minh Hien and
Lentz, Tomas and
van Miltenburg, Emiel",
editor = "Amblard, Maxime and
Breitholtz, Ellen",
booktitle = "Proceedings of the 15th International Conference on Computational Semantics",
month = jun,
year = "2023",
address = "Nancy, France",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.iwcs-1.7",
pages = "67--77",
abstract = "This case study investigates the extent to which a language model (GPT-2) is able to capture native speakers{'} intuitions about implicit causality in a sentence completion task. Study 1 reproduces earlier results (showing that the model{'}s surprisal values correlate with the implicit causality bias of the verb; Davis and van Schijndel 2021), and then examine the effects of gender and verb frequency on model performance. Study 2 examines the reasoning ability of GPT-2: Is the model able to produce more sensible motivations for why the subject VERBed the object if the verbs have stronger causality biases? For this study we took care to avoid human raters being biased by obscenities and disfluencies generated by the model.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="huynh-etal-2023-implicit">
<titleInfo>
<title>Implicit causality in GPT-2: a case study</title>
</titleInfo>
<name type="personal">
<namePart type="given">Minh</namePart>
<namePart type="given">Hien</namePart>
<namePart type="family">Huynh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tomas</namePart>
<namePart type="family">Lentz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emiel</namePart>
<namePart type="family">van Miltenburg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 15th International Conference on Computational Semantics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Maxime</namePart>
<namePart type="family">Amblard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ellen</namePart>
<namePart type="family">Breitholtz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Nancy, France</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This case study investigates the extent to which a language model (GPT-2) is able to capture native speakers’ intuitions about implicit causality in a sentence completion task. Study 1 reproduces earlier results (showing that the model’s surprisal values correlate with the implicit causality bias of the verb; Davis and van Schijndel 2021), and then examine the effects of gender and verb frequency on model performance. Study 2 examines the reasoning ability of GPT-2: Is the model able to produce more sensible motivations for why the subject VERBed the object if the verbs have stronger causality biases? For this study we took care to avoid human raters being biased by obscenities and disfluencies generated by the model.</abstract>
<identifier type="citekey">huynh-etal-2023-implicit</identifier>
<location>
<url>https://aclanthology.org/2023.iwcs-1.7</url>
</location>
<part>
<date>2023-06</date>
<extent unit="page">
<start>67</start>
<end>77</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Implicit causality in GPT-2: a case study
%A Huynh, Minh Hien
%A Lentz, Tomas
%A van Miltenburg, Emiel
%Y Amblard, Maxime
%Y Breitholtz, Ellen
%S Proceedings of the 15th International Conference on Computational Semantics
%D 2023
%8 June
%I Association for Computational Linguistics
%C Nancy, France
%F huynh-etal-2023-implicit
%X This case study investigates the extent to which a language model (GPT-2) is able to capture native speakers’ intuitions about implicit causality in a sentence completion task. Study 1 reproduces earlier results (showing that the model’s surprisal values correlate with the implicit causality bias of the verb; Davis and van Schijndel 2021), and then examine the effects of gender and verb frequency on model performance. Study 2 examines the reasoning ability of GPT-2: Is the model able to produce more sensible motivations for why the subject VERBed the object if the verbs have stronger causality biases? For this study we took care to avoid human raters being biased by obscenities and disfluencies generated by the model.
%U https://aclanthology.org/2023.iwcs-1.7
%P 67-77
Markdown (Informal)
[Implicit causality in GPT-2: a case study](https://aclanthology.org/2023.iwcs-1.7) (Huynh et al., IWCS 2023)
ACL
- Minh Hien Huynh, Tomas Lentz, and Emiel van Miltenburg. 2023. Implicit causality in GPT-2: a case study. In Proceedings of the 15th International Conference on Computational Semantics, pages 67–77, Nancy, France. Association for Computational Linguistics.