@inproceedings{ri-tsuruoka-2022-pretraining,
title = "Pretraining with Artificial Language: Studying Transferable Knowledge in Language Models",
author = "Ri, Ryokan and
Tsuruoka, Yoshimasa",
editor = "Muresan, Smaranda and
Nakov, Preslav and
Villavicencio, Aline",
booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.acl-long.504/",
doi = "10.18653/v1/2022.acl-long.504",
pages = "7302--7315",
abstract = "We investigate what kind of structural knowledge learned in neural network encoders is transferable to processing natural language. We design \textit{artificial languages} with structural properties that mimic natural language, pretrain encoders on the data, and see how much performance the encoder exhibits on downstream tasks in natural language.Our experimental results show that pretraining with an artificial language with a nesting dependency structure provides some knowledge transferable to natural language.A follow-up probing analysis indicates that its success in the transfer is related to the amount of encoded contextual information and what is transferred is the knowledge of \textit{position-aware context dependence} of language.Our results provide insights into how neural network encoders process human languages and the source of cross-lingual transferability of recent multilingual language models."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ri-tsuruoka-2022-pretraining">
<titleInfo>
<title>Pretraining with Artificial Language: Studying Transferable Knowledge in Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ryokan</namePart>
<namePart type="family">Ri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yoshimasa</namePart>
<namePart type="family">Tsuruoka</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Preslav</namePart>
<namePart type="family">Nakov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aline</namePart>
<namePart type="family">Villavicencio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We investigate what kind of structural knowledge learned in neural network encoders is transferable to processing natural language. We design artificial languages with structural properties that mimic natural language, pretrain encoders on the data, and see how much performance the encoder exhibits on downstream tasks in natural language.Our experimental results show that pretraining with an artificial language with a nesting dependency structure provides some knowledge transferable to natural language.A follow-up probing analysis indicates that its success in the transfer is related to the amount of encoded contextual information and what is transferred is the knowledge of position-aware context dependence of language.Our results provide insights into how neural network encoders process human languages and the source of cross-lingual transferability of recent multilingual language models.</abstract>
<identifier type="citekey">ri-tsuruoka-2022-pretraining</identifier>
<identifier type="doi">10.18653/v1/2022.acl-long.504</identifier>
<location>
<url>https://aclanthology.org/2022.acl-long.504/</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>7302</start>
<end>7315</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Pretraining with Artificial Language: Studying Transferable Knowledge in Language Models
%A Ri, Ryokan
%A Tsuruoka, Yoshimasa
%Y Muresan, Smaranda
%Y Nakov, Preslav
%Y Villavicencio, Aline
%S Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F ri-tsuruoka-2022-pretraining
%X We investigate what kind of structural knowledge learned in neural network encoders is transferable to processing natural language. We design artificial languages with structural properties that mimic natural language, pretrain encoders on the data, and see how much performance the encoder exhibits on downstream tasks in natural language.Our experimental results show that pretraining with an artificial language with a nesting dependency structure provides some knowledge transferable to natural language.A follow-up probing analysis indicates that its success in the transfer is related to the amount of encoded contextual information and what is transferred is the knowledge of position-aware context dependence of language.Our results provide insights into how neural network encoders process human languages and the source of cross-lingual transferability of recent multilingual language models.
%R 10.18653/v1/2022.acl-long.504
%U https://aclanthology.org/2022.acl-long.504/
%U https://doi.org/10.18653/v1/2022.acl-long.504
%P 7302-7315
Markdown (Informal)
[Pretraining with Artificial Language: Studying Transferable Knowledge in Language Models](https://aclanthology.org/2022.acl-long.504/) (Ri & Tsuruoka, ACL 2022)
ACL