@inproceedings{ishihara-etal-2018-neural,
title = "Neural Tensor Networks with Diagonal Slice Matrices",
author = "Ishihara, Takahiro and
Hayashi, Katsuhiko and
Manabe, Hitoshi and
Shimbo, Masashi and
Nagata, Masaaki",
editor = "Walker, Marilyn and
Ji, Heng and
Stent, Amanda",
booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)",
month = jun,
year = "2018",
address = "New Orleans, Louisiana",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/N18-1047",
doi = "10.18653/v1/N18-1047",
pages = "506--515",
abstract = "Although neural tensor networks (NTNs) have been successful in many NLP tasks, they require a large number of parameters to be estimated, which often leads to overfitting and a long training time. We address these issues by applying eigendecomposition to each slice matrix of a tensor to reduce its number of paramters. First, we evaluate our proposed NTN models on knowledge graph completion. Second, we extend the models to recursive NTNs (RNTNs) and evaluate them on logical reasoning tasks. These experiments show that our proposed models learn better and faster than the original (R)NTNs.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ishihara-etal-2018-neural">
<titleInfo>
<title>Neural Tensor Networks with Diagonal Slice Matrices</title>
</titleInfo>
<name type="personal">
<namePart type="given">Takahiro</namePart>
<namePart type="family">Ishihara</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Katsuhiko</namePart>
<namePart type="family">Hayashi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hitoshi</namePart>
<namePart type="family">Manabe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Masashi</namePart>
<namePart type="family">Shimbo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Masaaki</namePart>
<namePart type="family">Nagata</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marilyn</namePart>
<namePart type="family">Walker</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Heng</namePart>
<namePart type="family">Ji</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amanda</namePart>
<namePart type="family">Stent</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">New Orleans, Louisiana</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Although neural tensor networks (NTNs) have been successful in many NLP tasks, they require a large number of parameters to be estimated, which often leads to overfitting and a long training time. We address these issues by applying eigendecomposition to each slice matrix of a tensor to reduce its number of paramters. First, we evaluate our proposed NTN models on knowledge graph completion. Second, we extend the models to recursive NTNs (RNTNs) and evaluate them on logical reasoning tasks. These experiments show that our proposed models learn better and faster than the original (R)NTNs.</abstract>
<identifier type="citekey">ishihara-etal-2018-neural</identifier>
<identifier type="doi">10.18653/v1/N18-1047</identifier>
<location>
<url>https://aclanthology.org/N18-1047</url>
</location>
<part>
<date>2018-06</date>
<extent unit="page">
<start>506</start>
<end>515</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Neural Tensor Networks with Diagonal Slice Matrices
%A Ishihara, Takahiro
%A Hayashi, Katsuhiko
%A Manabe, Hitoshi
%A Shimbo, Masashi
%A Nagata, Masaaki
%Y Walker, Marilyn
%Y Ji, Heng
%Y Stent, Amanda
%S Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)
%D 2018
%8 June
%I Association for Computational Linguistics
%C New Orleans, Louisiana
%F ishihara-etal-2018-neural
%X Although neural tensor networks (NTNs) have been successful in many NLP tasks, they require a large number of parameters to be estimated, which often leads to overfitting and a long training time. We address these issues by applying eigendecomposition to each slice matrix of a tensor to reduce its number of paramters. First, we evaluate our proposed NTN models on knowledge graph completion. Second, we extend the models to recursive NTNs (RNTNs) and evaluate them on logical reasoning tasks. These experiments show that our proposed models learn better and faster than the original (R)NTNs.
%R 10.18653/v1/N18-1047
%U https://aclanthology.org/N18-1047
%U https://doi.org/10.18653/v1/N18-1047
%P 506-515
Markdown (Informal)
[Neural Tensor Networks with Diagonal Slice Matrices](https://aclanthology.org/N18-1047) (Ishihara et al., NAACL 2018)
ACL
- Takahiro Ishihara, Katsuhiko Hayashi, Hitoshi Manabe, Masashi Shimbo, and Masaaki Nagata. 2018. Neural Tensor Networks with Diagonal Slice Matrices. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 506–515, New Orleans, Louisiana. Association for Computational Linguistics.