@inproceedings{prange-wong-2023-reanalyzing,
title = "Reanalyzing {L}2 Preposition Learning with {B}ayesian Mixed Effects and a Pretrained Language Model",
author = "Prange, Jakob and
Wong, Man Ho Ivy",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.acl-long.712",
doi = "10.18653/v1/2023.acl-long.712",
pages = "12722--12736",
abstract = "We use both Bayesian and neural models to dissect a data set of Chinese learners{'} pre- and post-interventional responses to two tests measuring their understanding of English prepositions. The results mostly replicate previous findings from frequentist analyses and newly reveal crucial interactions between student ability, task type, and stimulus sentence. Given the sparsity of the data as well as high diversity among learners, the Bayesian method proves most useful; but we also see potential in using language model probabilities as predictors of grammaticality and learnability.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="prange-wong-2023-reanalyzing">
<titleInfo>
<title>Reanalyzing L2 Preposition Learning with Bayesian Mixed Effects and a Pretrained Language Model</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jakob</namePart>
<namePart type="family">Prange</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Man</namePart>
<namePart type="given">Ho</namePart>
<namePart type="given">Ivy</namePart>
<namePart type="family">Wong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rogers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jordan</namePart>
<namePart type="family">Boyd-Graber</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Naoaki</namePart>
<namePart type="family">Okazaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Toronto, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We use both Bayesian and neural models to dissect a data set of Chinese learners’ pre- and post-interventional responses to two tests measuring their understanding of English prepositions. The results mostly replicate previous findings from frequentist analyses and newly reveal crucial interactions between student ability, task type, and stimulus sentence. Given the sparsity of the data as well as high diversity among learners, the Bayesian method proves most useful; but we also see potential in using language model probabilities as predictors of grammaticality and learnability.</abstract>
<identifier type="citekey">prange-wong-2023-reanalyzing</identifier>
<identifier type="doi">10.18653/v1/2023.acl-long.712</identifier>
<location>
<url>https://aclanthology.org/2023.acl-long.712</url>
</location>
<part>
<date>2023-07</date>
<extent unit="page">
<start>12722</start>
<end>12736</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Reanalyzing L2 Preposition Learning with Bayesian Mixed Effects and a Pretrained Language Model
%A Prange, Jakob
%A Wong, Man Ho Ivy
%Y Rogers, Anna
%Y Boyd-Graber, Jordan
%Y Okazaki, Naoaki
%S Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2023
%8 July
%I Association for Computational Linguistics
%C Toronto, Canada
%F prange-wong-2023-reanalyzing
%X We use both Bayesian and neural models to dissect a data set of Chinese learners’ pre- and post-interventional responses to two tests measuring their understanding of English prepositions. The results mostly replicate previous findings from frequentist analyses and newly reveal crucial interactions between student ability, task type, and stimulus sentence. Given the sparsity of the data as well as high diversity among learners, the Bayesian method proves most useful; but we also see potential in using language model probabilities as predictors of grammaticality and learnability.
%R 10.18653/v1/2023.acl-long.712
%U https://aclanthology.org/2023.acl-long.712
%U https://doi.org/10.18653/v1/2023.acl-long.712
%P 12722-12736
Markdown (Informal)
[Reanalyzing L2 Preposition Learning with Bayesian Mixed Effects and a Pretrained Language Model](https://aclanthology.org/2023.acl-long.712) (Prange & Wong, ACL 2023)
ACL