@inproceedings{sahay-etal-2018-multimodal,
title = "Multimodal Relational Tensor Network for Sentiment and Emotion Classification",
author = "Sahay, Saurav and
Kumar, Shachi H and
Xia, Rui and
Huang, Jonathan and
Nachman, Lama",
editor = "Zadeh, Amir and
Liang, Paul Pu and
Morency, Louis-Philippe and
Poria, Soujanya and
Cambria, Erik and
Scherer, Stefan",
booktitle = "Proceedings of Grand Challenge and Workshop on Human Multimodal Language (Challenge-{HML})",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-3303",
doi = "10.18653/v1/W18-3303",
pages = "20--27",
abstract = "Understanding Affect from video segments has brought researchers from the language, audio and video domains together. Most of the current multimodal research in this area deals with various techniques to fuse the modalities, and mostly treat the segments of a video independently. Motivated by the work of (Zadeh et al., 2017) and (Poria et al., 2017), we present our architecture, Relational Tensor Network, where we use the inter-modal interactions within a segment (intra-segment) and also consider the sequence of segments in a video to model the inter-segment inter-modal interactions. We also generate rich representations of text and audio modalities by leveraging richer audio and linguistic context alongwith fusing fine-grained knowledge based polarity scores from text. We present the results of our model on CMU-MOSEI dataset and show that our model outperforms many baselines and state of the art methods for sentiment classification and emotion recognition.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sahay-etal-2018-multimodal">
<titleInfo>
<title>Multimodal Relational Tensor Network for Sentiment and Emotion Classification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Saurav</namePart>
<namePart type="family">Sahay</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shachi</namePart>
<namePart type="given">H</namePart>
<namePart type="family">Kumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rui</namePart>
<namePart type="family">Xia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jonathan</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lama</namePart>
<namePart type="family">Nachman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of Grand Challenge and Workshop on Human Multimodal Language (Challenge-HML)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Amir</namePart>
<namePart type="family">Zadeh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Paul</namePart>
<namePart type="given">Pu</namePart>
<namePart type="family">Liang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Louis-Philippe</namePart>
<namePart type="family">Morency</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Soujanya</namePart>
<namePart type="family">Poria</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Erik</namePart>
<namePart type="family">Cambria</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stefan</namePart>
<namePart type="family">Scherer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Understanding Affect from video segments has brought researchers from the language, audio and video domains together. Most of the current multimodal research in this area deals with various techniques to fuse the modalities, and mostly treat the segments of a video independently. Motivated by the work of (Zadeh et al., 2017) and (Poria et al., 2017), we present our architecture, Relational Tensor Network, where we use the inter-modal interactions within a segment (intra-segment) and also consider the sequence of segments in a video to model the inter-segment inter-modal interactions. We also generate rich representations of text and audio modalities by leveraging richer audio and linguistic context alongwith fusing fine-grained knowledge based polarity scores from text. We present the results of our model on CMU-MOSEI dataset and show that our model outperforms many baselines and state of the art methods for sentiment classification and emotion recognition.</abstract>
<identifier type="citekey">sahay-etal-2018-multimodal</identifier>
<identifier type="doi">10.18653/v1/W18-3303</identifier>
<location>
<url>https://aclanthology.org/W18-3303</url>
</location>
<part>
<date>2018-07</date>
<extent unit="page">
<start>20</start>
<end>27</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Multimodal Relational Tensor Network for Sentiment and Emotion Classification
%A Sahay, Saurav
%A Kumar, Shachi H.
%A Xia, Rui
%A Huang, Jonathan
%A Nachman, Lama
%Y Zadeh, Amir
%Y Liang, Paul Pu
%Y Morency, Louis-Philippe
%Y Poria, Soujanya
%Y Cambria, Erik
%Y Scherer, Stefan
%S Proceedings of Grand Challenge and Workshop on Human Multimodal Language (Challenge-HML)
%D 2018
%8 July
%I Association for Computational Linguistics
%C Melbourne, Australia
%F sahay-etal-2018-multimodal
%X Understanding Affect from video segments has brought researchers from the language, audio and video domains together. Most of the current multimodal research in this area deals with various techniques to fuse the modalities, and mostly treat the segments of a video independently. Motivated by the work of (Zadeh et al., 2017) and (Poria et al., 2017), we present our architecture, Relational Tensor Network, where we use the inter-modal interactions within a segment (intra-segment) and also consider the sequence of segments in a video to model the inter-segment inter-modal interactions. We also generate rich representations of text and audio modalities by leveraging richer audio and linguistic context alongwith fusing fine-grained knowledge based polarity scores from text. We present the results of our model on CMU-MOSEI dataset and show that our model outperforms many baselines and state of the art methods for sentiment classification and emotion recognition.
%R 10.18653/v1/W18-3303
%U https://aclanthology.org/W18-3303
%U https://doi.org/10.18653/v1/W18-3303
%P 20-27
Markdown (Informal)
[Multimodal Relational Tensor Network for Sentiment and Emotion Classification](https://aclanthology.org/W18-3303) (Sahay et al., ACL 2018)
ACL