@inproceedings{tang-etal-2021-berts,
title = "Are {BERT}s Sensitive to Native Interference in {L}2 Production?",
author = "Tang, Zixin and
Mitra, Prasenjit and
Reitter, David",
editor = "Sedoc, Jo{\~a}o and
Rogers, Anna and
Rumshisky, Anna and
Tafreshi, Shabnam",
booktitle = "Proceedings of the Second Workshop on Insights from Negative Results in NLP",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.insights-1.6",
doi = "10.18653/v1/2021.insights-1.6",
pages = "36--41",
abstract = "With the essays part from The International Corpus Network of Asian Learners of English (ICNALE) and the TOEFL11 corpus, we fine-tuned neural language models based on BERT to predict English learners{'} native languages. Results showed neural models can learn to represent and detect such native language impacts, but multilingually trained models have no advantage in doing so.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tang-etal-2021-berts">
<titleInfo>
<title>Are BERTs Sensitive to Native Interference in L2 Production?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zixin</namePart>
<namePart type="family">Tang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Prasenjit</namePart>
<namePart type="family">Mitra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Reitter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Insights from Negative Results in NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">João</namePart>
<namePart type="family">Sedoc</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rogers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rumshisky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shabnam</namePart>
<namePart type="family">Tafreshi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online and Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>With the essays part from The International Corpus Network of Asian Learners of English (ICNALE) and the TOEFL11 corpus, we fine-tuned neural language models based on BERT to predict English learners’ native languages. Results showed neural models can learn to represent and detect such native language impacts, but multilingually trained models have no advantage in doing so.</abstract>
<identifier type="citekey">tang-etal-2021-berts</identifier>
<identifier type="doi">10.18653/v1/2021.insights-1.6</identifier>
<location>
<url>https://aclanthology.org/2021.insights-1.6</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>36</start>
<end>41</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Are BERTs Sensitive to Native Interference in L2 Production?
%A Tang, Zixin
%A Mitra, Prasenjit
%A Reitter, David
%Y Sedoc, João
%Y Rogers, Anna
%Y Rumshisky, Anna
%Y Tafreshi, Shabnam
%S Proceedings of the Second Workshop on Insights from Negative Results in NLP
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online and Punta Cana, Dominican Republic
%F tang-etal-2021-berts
%X With the essays part from The International Corpus Network of Asian Learners of English (ICNALE) and the TOEFL11 corpus, we fine-tuned neural language models based on BERT to predict English learners’ native languages. Results showed neural models can learn to represent and detect such native language impacts, but multilingually trained models have no advantage in doing so.
%R 10.18653/v1/2021.insights-1.6
%U https://aclanthology.org/2021.insights-1.6
%U https://doi.org/10.18653/v1/2021.insights-1.6
%P 36-41
Markdown (Informal)
[Are BERTs Sensitive to Native Interference in L2 Production?](https://aclanthology.org/2021.insights-1.6) (Tang et al., insights 2021)
ACL