@inproceedings{chen-2021-attentive,
title = "Attentive Tree-structured Network for Monotonicity Reasoning",
author = "Chen, Zeming",
editor = "Kalouli, Aikaterini-Lida and
Moss, Lawrence S.",
booktitle = "Proceedings of the 1st and 2nd Workshops on Natural Logic Meets Machine Learning (NALOMA)",
month = jun,
year = "2021",
address = "Groningen, the Netherlands (online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.naloma-1.3",
pages = "12--21",
abstract = "Many state-of-art neural models designed for monotonicity reasoning perform poorly on downward inference. To address this shortcoming, we developed an attentive tree-structured neural network. It consists of a tree-based long-short-term-memory network (Tree-LSTM) with soft attention. It is designed to model the syntactic parse tree information from the sentence pair of a reasoning task. A self-attentive aggregator is used for aligning the representations of the premise and the hypothesis. We present our model and evaluate it using the Monotonicity Entailment Dataset (MED). We show and attempt to explain that our model outperforms existing models on MED.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chen-2021-attentive">
<titleInfo>
<title>Attentive Tree-structured Network for Monotonicity Reasoning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zeming</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st and 2nd Workshops on Natural Logic Meets Machine Learning (NALOMA)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Aikaterini-Lida</namePart>
<namePart type="family">Kalouli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lawrence</namePart>
<namePart type="given">S</namePart>
<namePart type="family">Moss</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Groningen, the Netherlands (online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Many state-of-art neural models designed for monotonicity reasoning perform poorly on downward inference. To address this shortcoming, we developed an attentive tree-structured neural network. It consists of a tree-based long-short-term-memory network (Tree-LSTM) with soft attention. It is designed to model the syntactic parse tree information from the sentence pair of a reasoning task. A self-attentive aggregator is used for aligning the representations of the premise and the hypothesis. We present our model and evaluate it using the Monotonicity Entailment Dataset (MED). We show and attempt to explain that our model outperforms existing models on MED.</abstract>
<identifier type="citekey">chen-2021-attentive</identifier>
<location>
<url>https://aclanthology.org/2021.naloma-1.3</url>
</location>
<part>
<date>2021-06</date>
<extent unit="page">
<start>12</start>
<end>21</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Attentive Tree-structured Network for Monotonicity Reasoning
%A Chen, Zeming
%Y Kalouli, Aikaterini-Lida
%Y Moss, Lawrence S.
%S Proceedings of the 1st and 2nd Workshops on Natural Logic Meets Machine Learning (NALOMA)
%D 2021
%8 June
%I Association for Computational Linguistics
%C Groningen, the Netherlands (online)
%F chen-2021-attentive
%X Many state-of-art neural models designed for monotonicity reasoning perform poorly on downward inference. To address this shortcoming, we developed an attentive tree-structured neural network. It consists of a tree-based long-short-term-memory network (Tree-LSTM) with soft attention. It is designed to model the syntactic parse tree information from the sentence pair of a reasoning task. A self-attentive aggregator is used for aligning the representations of the premise and the hypothesis. We present our model and evaluate it using the Monotonicity Entailment Dataset (MED). We show and attempt to explain that our model outperforms existing models on MED.
%U https://aclanthology.org/2021.naloma-1.3
%P 12-21
Markdown (Informal)
[Attentive Tree-structured Network for Monotonicity Reasoning](https://aclanthology.org/2021.naloma-1.3) (Chen, NALOMA 2021)
ACL