@inproceedings{delbrouck-etal-2020-transformer,
title = "A Transformer-based joint-encoding for Emotion Recognition and Sentiment Analysis",
author = "Delbrouck, Jean-Benoit and
Tits, No{\'e} and
Brousmiche, Mathilde and
Dupont, St{\'e}phane",
editor = "Zadeh, Amir and
Morency, Louis-Philippe and
Liang, Paul Pu and
Poria, Soujanya",
booktitle = "Second Grand-Challenge and Workshop on Multimodal Language (Challenge-HML)",
month = jul,
year = "2020",
address = "Seattle, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.challengehml-1.1",
doi = "10.18653/v1/2020.challengehml-1.1",
pages = "1--7",
abstract = "Understanding expressed sentiment and emotions are two crucial factors in human multimodal language. This paper describes a Transformer-based joint-encoding (TBJE) for the task of Emotion Recognition and Sentiment Analysis. In addition to use the Transformer architecture, our approach relies on a modular co-attention and a glimpse layer to jointly encode one or more modalities. The proposed solution has also been submitted to the ACL20: Second Grand-Challenge on Multimodal Language to be evaluated on the CMU-MOSEI dataset. The code to replicate the presented experiments is open-source .",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="delbrouck-etal-2020-transformer">
<titleInfo>
<title>A Transformer-based joint-encoding for Emotion Recognition and Sentiment Analysis</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jean-Benoit</namePart>
<namePart type="family">Delbrouck</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Noé</namePart>
<namePart type="family">Tits</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mathilde</namePart>
<namePart type="family">Brousmiche</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stéphane</namePart>
<namePart type="family">Dupont</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Second Grand-Challenge and Workshop on Multimodal Language (Challenge-HML)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Amir</namePart>
<namePart type="family">Zadeh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Louis-Philippe</namePart>
<namePart type="family">Morency</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Paul</namePart>
<namePart type="given">Pu</namePart>
<namePart type="family">Liang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Soujanya</namePart>
<namePart type="family">Poria</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Understanding expressed sentiment and emotions are two crucial factors in human multimodal language. This paper describes a Transformer-based joint-encoding (TBJE) for the task of Emotion Recognition and Sentiment Analysis. In addition to use the Transformer architecture, our approach relies on a modular co-attention and a glimpse layer to jointly encode one or more modalities. The proposed solution has also been submitted to the ACL20: Second Grand-Challenge on Multimodal Language to be evaluated on the CMU-MOSEI dataset. The code to replicate the presented experiments is open-source .</abstract>
<identifier type="citekey">delbrouck-etal-2020-transformer</identifier>
<identifier type="doi">10.18653/v1/2020.challengehml-1.1</identifier>
<location>
<url>https://aclanthology.org/2020.challengehml-1.1</url>
</location>
<part>
<date>2020-07</date>
<extent unit="page">
<start>1</start>
<end>7</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Transformer-based joint-encoding for Emotion Recognition and Sentiment Analysis
%A Delbrouck, Jean-Benoit
%A Tits, Noé
%A Brousmiche, Mathilde
%A Dupont, Stéphane
%Y Zadeh, Amir
%Y Morency, Louis-Philippe
%Y Liang, Paul Pu
%Y Poria, Soujanya
%S Second Grand-Challenge and Workshop on Multimodal Language (Challenge-HML)
%D 2020
%8 July
%I Association for Computational Linguistics
%C Seattle, USA
%F delbrouck-etal-2020-transformer
%X Understanding expressed sentiment and emotions are two crucial factors in human multimodal language. This paper describes a Transformer-based joint-encoding (TBJE) for the task of Emotion Recognition and Sentiment Analysis. In addition to use the Transformer architecture, our approach relies on a modular co-attention and a glimpse layer to jointly encode one or more modalities. The proposed solution has also been submitted to the ACL20: Second Grand-Challenge on Multimodal Language to be evaluated on the CMU-MOSEI dataset. The code to replicate the presented experiments is open-source .
%R 10.18653/v1/2020.challengehml-1.1
%U https://aclanthology.org/2020.challengehml-1.1
%U https://doi.org/10.18653/v1/2020.challengehml-1.1
%P 1-7
Markdown (Informal)
[A Transformer-based joint-encoding for Emotion Recognition and Sentiment Analysis](https://aclanthology.org/2020.challengehml-1.1) (Delbrouck et al., Challenge-HML 2020)
ACL