@inproceedings{traylor-etal-2021-transferring,
title = "Transferring Representations of Logical Connectives",
author = "Traylor, Aaron and
Pavlick, Ellie and
Feiman, Roman",
editor = "Kalouli, Aikaterini-Lida and
Moss, Lawrence S.",
booktitle = "Proceedings of the 1st and 2nd Workshops on Natural Logic Meets Machine Learning (NALOMA)",
month = jun,
year = "2021",
address = "Groningen, the Netherlands (online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.naloma-1.4",
pages = "22--25",
abstract = "In modern natural language processing pipelines, it is common practice to {``}pretrain{''} a generative language model on a large corpus of text, and then to {``}finetune{''} the created representations by continuing to train them on a discriminative textual inference task. However, it is not immediately clear whether the logical meaning necessary to model logical entailment is captured by language models in this paradigm. We examine this pretrain-finetune recipe with language models trained on a synthetic propositional language entailment task, and present results on test sets probing models{'} knowledge of axioms of first order logic.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="traylor-etal-2021-transferring">
<titleInfo>
<title>Transferring Representations of Logical Connectives</title>
</titleInfo>
<name type="personal">
<namePart type="given">Aaron</namePart>
<namePart type="family">Traylor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ellie</namePart>
<namePart type="family">Pavlick</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roman</namePart>
<namePart type="family">Feiman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st and 2nd Workshops on Natural Logic Meets Machine Learning (NALOMA)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Aikaterini-Lida</namePart>
<namePart type="family">Kalouli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lawrence</namePart>
<namePart type="given">S</namePart>
<namePart type="family">Moss</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Groningen, the Netherlands (online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In modern natural language processing pipelines, it is common practice to “pretrain” a generative language model on a large corpus of text, and then to “finetune” the created representations by continuing to train them on a discriminative textual inference task. However, it is not immediately clear whether the logical meaning necessary to model logical entailment is captured by language models in this paradigm. We examine this pretrain-finetune recipe with language models trained on a synthetic propositional language entailment task, and present results on test sets probing models’ knowledge of axioms of first order logic.</abstract>
<identifier type="citekey">traylor-etal-2021-transferring</identifier>
<location>
<url>https://aclanthology.org/2021.naloma-1.4</url>
</location>
<part>
<date>2021-06</date>
<extent unit="page">
<start>22</start>
<end>25</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Transferring Representations of Logical Connectives
%A Traylor, Aaron
%A Pavlick, Ellie
%A Feiman, Roman
%Y Kalouli, Aikaterini-Lida
%Y Moss, Lawrence S.
%S Proceedings of the 1st and 2nd Workshops on Natural Logic Meets Machine Learning (NALOMA)
%D 2021
%8 June
%I Association for Computational Linguistics
%C Groningen, the Netherlands (online)
%F traylor-etal-2021-transferring
%X In modern natural language processing pipelines, it is common practice to “pretrain” a generative language model on a large corpus of text, and then to “finetune” the created representations by continuing to train them on a discriminative textual inference task. However, it is not immediately clear whether the logical meaning necessary to model logical entailment is captured by language models in this paradigm. We examine this pretrain-finetune recipe with language models trained on a synthetic propositional language entailment task, and present results on test sets probing models’ knowledge of axioms of first order logic.
%U https://aclanthology.org/2021.naloma-1.4
%P 22-25
Markdown (Informal)
[Transferring Representations of Logical Connectives](https://aclanthology.org/2021.naloma-1.4) (Traylor et al., NALOMA 2021)
ACL
- Aaron Traylor, Ellie Pavlick, and Roman Feiman. 2021. Transferring Representations of Logical Connectives. In Proceedings of the 1st and 2nd Workshops on Natural Logic Meets Machine Learning (NALOMA), pages 22–25, Groningen, the Netherlands (online). Association for Computational Linguistics.