@inproceedings{kogkalidis-etal-2020-neural,
title = "Neural Proof Nets",
author = "Kogkalidis, Konstantinos and
Moortgat, Michael and
Moot, Richard",
editor = "Fern{\'a}ndez, Raquel and
Linzen, Tal",
booktitle = "Proceedings of the 24th Conference on Computational Natural Language Learning",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.conll-1.3/",
doi = "10.18653/v1/2020.conll-1.3",
pages = "26--40",
abstract = "Linear logic and the linear {\ensuremath{\lambda}}-calculus have a long standing tradition in the study of natural language form and meaning. Among the proof calculi of linear logic, proof nets are of particular interest, offering an attractive geometric representation of derivations that is unburdened by the bureaucratic complications of conventional prooftheoretic formats. Building on recent advances in set-theoretic learning, we propose a neural variant of proof nets based on Sinkhorn networks, which allows us to translate parsing as the problem of extracting syntactic primitives and permuting them into alignment. Our methodology induces a batch-efficient, end-to-end differentiable architecture that actualizes a formally grounded yet highly efficient neuro-symbolic parser. We test our approach on {\AE}Thel, a dataset of type-logical derivations for written Dutch, where it manages to correctly transcribe raw text sentences into proofs and terms of the linear {\ensuremath{\lambda}}-calculus with an accuracy of as high as 70{\%}."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kogkalidis-etal-2020-neural">
<titleInfo>
<title>Neural Proof Nets</title>
</titleInfo>
<name type="personal">
<namePart type="given">Konstantinos</namePart>
<namePart type="family">Kogkalidis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Moortgat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Richard</namePart>
<namePart type="family">Moot</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 24th Conference on Computational Natural Language Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Raquel</namePart>
<namePart type="family">Fernández</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tal</namePart>
<namePart type="family">Linzen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Linear logic and the linear \ensuremathłambda-calculus have a long standing tradition in the study of natural language form and meaning. Among the proof calculi of linear logic, proof nets are of particular interest, offering an attractive geometric representation of derivations that is unburdened by the bureaucratic complications of conventional prooftheoretic formats. Building on recent advances in set-theoretic learning, we propose a neural variant of proof nets based on Sinkhorn networks, which allows us to translate parsing as the problem of extracting syntactic primitives and permuting them into alignment. Our methodology induces a batch-efficient, end-to-end differentiable architecture that actualizes a formally grounded yet highly efficient neuro-symbolic parser. We test our approach on ÆThel, a dataset of type-logical derivations for written Dutch, where it manages to correctly transcribe raw text sentences into proofs and terms of the linear \ensuremathłambda-calculus with an accuracy of as high as 70%.</abstract>
<identifier type="citekey">kogkalidis-etal-2020-neural</identifier>
<identifier type="doi">10.18653/v1/2020.conll-1.3</identifier>
<location>
<url>https://aclanthology.org/2020.conll-1.3/</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>26</start>
<end>40</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Neural Proof Nets
%A Kogkalidis, Konstantinos
%A Moortgat, Michael
%A Moot, Richard
%Y Fernández, Raquel
%Y Linzen, Tal
%S Proceedings of the 24th Conference on Computational Natural Language Learning
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F kogkalidis-etal-2020-neural
%X Linear logic and the linear \ensuremathłambda-calculus have a long standing tradition in the study of natural language form and meaning. Among the proof calculi of linear logic, proof nets are of particular interest, offering an attractive geometric representation of derivations that is unburdened by the bureaucratic complications of conventional prooftheoretic formats. Building on recent advances in set-theoretic learning, we propose a neural variant of proof nets based on Sinkhorn networks, which allows us to translate parsing as the problem of extracting syntactic primitives and permuting them into alignment. Our methodology induces a batch-efficient, end-to-end differentiable architecture that actualizes a formally grounded yet highly efficient neuro-symbolic parser. We test our approach on ÆThel, a dataset of type-logical derivations for written Dutch, where it manages to correctly transcribe raw text sentences into proofs and terms of the linear \ensuremathłambda-calculus with an accuracy of as high as 70%.
%R 10.18653/v1/2020.conll-1.3
%U https://aclanthology.org/2020.conll-1.3/
%U https://doi.org/10.18653/v1/2020.conll-1.3
%P 26-40
Markdown (Informal)
[Neural Proof Nets](https://aclanthology.org/2020.conll-1.3/) (Kogkalidis et al., CoNLL 2020)
ACL
- Konstantinos Kogkalidis, Michael Moortgat, and Richard Moot. 2020. Neural Proof Nets. In Proceedings of the 24th Conference on Computational Natural Language Learning, pages 26–40, Online. Association for Computational Linguistics.