@inproceedings{rozanova-etal-2021-supporting,
title = "Supporting Context Monotonicity Abstractions in Neural {NLI} Models",
author = "Rozanova, Julia and
Ferreira, Deborah and
Thayaparan, Mokanarangan and
Valentino, Marco and
Freitas, Andr{\'e}",
editor = "Kalouli, Aikaterini-Lida and
Moss, Lawrence S.",
booktitle = "Proceedings of the 1st and 2nd Workshops on Natural Logic Meets Machine Learning (NALOMA)",
month = jun,
year = "2021",
address = "Groningen, the Netherlands (online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.naloma-1.6",
pages = "41--50",
abstract = "Natural language contexts display logical regularities with respect to substitutions of related concepts: these are captured in a functional order-theoretic property called monotonicity. For a certain class of NLI problems where the resulting entailment label depends only on the context monotonicity and the relation between the substituted concepts, we build on previous techniques that aim to improve the performance of NLI models for these problems, as consistent performance across both upward and downward monotone contexts still seems difficult to attain even for state of the art models. To this end, we reframe the problem of context monotonicity classification to make it compatible with transformer-based pre-trained NLI models and add this task to the training pipeline. Furthermore, we introduce a sound and complete simplified monotonicity logic formalism which describes our treatment of contexts as abstract units. Using the notions in our formalism, we adapt targeted challenge sets to investigate whether an intermediate context monotonicity classification task can aid NLI models{'} performance on examples exhibiting monotonicity reasoning.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="rozanova-etal-2021-supporting">
<titleInfo>
<title>Supporting Context Monotonicity Abstractions in Neural NLI Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Julia</namePart>
<namePart type="family">Rozanova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Deborah</namePart>
<namePart type="family">Ferreira</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mokanarangan</namePart>
<namePart type="family">Thayaparan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marco</namePart>
<namePart type="family">Valentino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">André</namePart>
<namePart type="family">Freitas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st and 2nd Workshops on Natural Logic Meets Machine Learning (NALOMA)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Aikaterini-Lida</namePart>
<namePart type="family">Kalouli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lawrence</namePart>
<namePart type="given">S</namePart>
<namePart type="family">Moss</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Groningen, the Netherlands (online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Natural language contexts display logical regularities with respect to substitutions of related concepts: these are captured in a functional order-theoretic property called monotonicity. For a certain class of NLI problems where the resulting entailment label depends only on the context monotonicity and the relation between the substituted concepts, we build on previous techniques that aim to improve the performance of NLI models for these problems, as consistent performance across both upward and downward monotone contexts still seems difficult to attain even for state of the art models. To this end, we reframe the problem of context monotonicity classification to make it compatible with transformer-based pre-trained NLI models and add this task to the training pipeline. Furthermore, we introduce a sound and complete simplified monotonicity logic formalism which describes our treatment of contexts as abstract units. Using the notions in our formalism, we adapt targeted challenge sets to investigate whether an intermediate context monotonicity classification task can aid NLI models’ performance on examples exhibiting monotonicity reasoning.</abstract>
<identifier type="citekey">rozanova-etal-2021-supporting</identifier>
<location>
<url>https://aclanthology.org/2021.naloma-1.6</url>
</location>
<part>
<date>2021-06</date>
<extent unit="page">
<start>41</start>
<end>50</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Supporting Context Monotonicity Abstractions in Neural NLI Models
%A Rozanova, Julia
%A Ferreira, Deborah
%A Thayaparan, Mokanarangan
%A Valentino, Marco
%A Freitas, André
%Y Kalouli, Aikaterini-Lida
%Y Moss, Lawrence S.
%S Proceedings of the 1st and 2nd Workshops on Natural Logic Meets Machine Learning (NALOMA)
%D 2021
%8 June
%I Association for Computational Linguistics
%C Groningen, the Netherlands (online)
%F rozanova-etal-2021-supporting
%X Natural language contexts display logical regularities with respect to substitutions of related concepts: these are captured in a functional order-theoretic property called monotonicity. For a certain class of NLI problems where the resulting entailment label depends only on the context monotonicity and the relation between the substituted concepts, we build on previous techniques that aim to improve the performance of NLI models for these problems, as consistent performance across both upward and downward monotone contexts still seems difficult to attain even for state of the art models. To this end, we reframe the problem of context monotonicity classification to make it compatible with transformer-based pre-trained NLI models and add this task to the training pipeline. Furthermore, we introduce a sound and complete simplified monotonicity logic formalism which describes our treatment of contexts as abstract units. Using the notions in our formalism, we adapt targeted challenge sets to investigate whether an intermediate context monotonicity classification task can aid NLI models’ performance on examples exhibiting monotonicity reasoning.
%U https://aclanthology.org/2021.naloma-1.6
%P 41-50
Markdown (Informal)
[Supporting Context Monotonicity Abstractions in Neural NLI Models](https://aclanthology.org/2021.naloma-1.6) (Rozanova et al., NALOMA 2021)
ACL
- Julia Rozanova, Deborah Ferreira, Mokanarangan Thayaparan, Marco Valentino, and André Freitas. 2021. Supporting Context Monotonicity Abstractions in Neural NLI Models. In Proceedings of the 1st and 2nd Workshops on Natural Logic Meets Machine Learning (NALOMA), pages 41–50, Groningen, the Netherlands (online). Association for Computational Linguistics.