@inproceedings{kodner-gupta-2020-overestimation,
title = "Overestimation of Syntactic Representation in Neural Language Models",
author = "Kodner, Jordan and
Gupta, Nitish",
editor = "Jurafsky, Dan and
Chai, Joyce and
Schluter, Natalie and
Tetreault, Joel",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.acl-main.160",
doi = "10.18653/v1/2020.acl-main.160",
pages = "1757--1762",
abstract = "With the advent of powerful neural language models over the last few years, research attention has increasingly focused on what aspects of language they represent that make them so successful. Several testing methodologies have been developed to probe models{'} syntactic representations. One popular method for determining a model{'}s ability to induce syntactic structure trains a model on strings generated according to a template then tests the model{'}s ability to distinguish such strings from superficially similar ones with different syntax. We illustrate a fundamental problem with this approach by reproducing positive results from a recent paper with two non-syntactic baseline language models: an n-gram model and an LSTM model trained on scrambled inputs.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kodner-gupta-2020-overestimation">
<titleInfo>
<title>Overestimation of Syntactic Representation in Neural Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jordan</namePart>
<namePart type="family">Kodner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nitish</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dan</namePart>
<namePart type="family">Jurafsky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Chai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Natalie</namePart>
<namePart type="family">Schluter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joel</namePart>
<namePart type="family">Tetreault</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>With the advent of powerful neural language models over the last few years, research attention has increasingly focused on what aspects of language they represent that make them so successful. Several testing methodologies have been developed to probe models’ syntactic representations. One popular method for determining a model’s ability to induce syntactic structure trains a model on strings generated according to a template then tests the model’s ability to distinguish such strings from superficially similar ones with different syntax. We illustrate a fundamental problem with this approach by reproducing positive results from a recent paper with two non-syntactic baseline language models: an n-gram model and an LSTM model trained on scrambled inputs.</abstract>
<identifier type="citekey">kodner-gupta-2020-overestimation</identifier>
<identifier type="doi">10.18653/v1/2020.acl-main.160</identifier>
<location>
<url>https://aclanthology.org/2020.acl-main.160</url>
</location>
<part>
<date>2020-07</date>
<extent unit="page">
<start>1757</start>
<end>1762</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Overestimation of Syntactic Representation in Neural Language Models
%A Kodner, Jordan
%A Gupta, Nitish
%Y Jurafsky, Dan
%Y Chai, Joyce
%Y Schluter, Natalie
%Y Tetreault, Joel
%S Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics
%D 2020
%8 July
%I Association for Computational Linguistics
%C Online
%F kodner-gupta-2020-overestimation
%X With the advent of powerful neural language models over the last few years, research attention has increasingly focused on what aspects of language they represent that make them so successful. Several testing methodologies have been developed to probe models’ syntactic representations. One popular method for determining a model’s ability to induce syntactic structure trains a model on strings generated according to a template then tests the model’s ability to distinguish such strings from superficially similar ones with different syntax. We illustrate a fundamental problem with this approach by reproducing positive results from a recent paper with two non-syntactic baseline language models: an n-gram model and an LSTM model trained on scrambled inputs.
%R 10.18653/v1/2020.acl-main.160
%U https://aclanthology.org/2020.acl-main.160
%U https://doi.org/10.18653/v1/2020.acl-main.160
%P 1757-1762
Markdown (Informal)
[Overestimation of Syntactic Representation in Neural Language Models](https://aclanthology.org/2020.acl-main.160) (Kodner & Gupta, ACL 2020)
ACL