@inproceedings{arimoto-okanoya-2016-comparison,
title = "Comparison of Emotional Understanding in Modality-Controlled Environments using Multimodal Online Emotional Communication Corpus",
author = "Arimoto, Yoshiko and
Okanoya, Kazuo",
editor = "Calzolari, Nicoletta and
Choukri, Khalid and
Declerck, Thierry and
Goggi, Sara and
Grobelnik, Marko and
Maegaard, Bente and
Mariani, Joseph and
Mazo, Helene and
Moreno, Asuncion and
Odijk, Jan and
Piperidis, Stelios",
booktitle = "Proceedings of the Tenth International Conference on Language Resources and Evaluation ({LREC}'16)",
month = may,
year = "2016",
address = "Portoro{\v{z}}, Slovenia",
publisher = "European Language Resources Association (ELRA)",
url = "https://aclanthology.org/L16-1343",
pages = "2162--2167",
abstract = "In online computer-mediated communication, speakers were considered to have experienced difficulties in catching their partner{'}s emotions and in conveying their own emotions. To explain why online emotional communication is so difficult and to investigate how this problem should be solved, multimodal online emotional communication corpus was constructed by recording approximately 100 speakers{'} emotional expressions and reactions in a modality-controlled environment. Speakers communicated over the Internet using video chat, voice chat or text chat; their face-to-face conversations were used for comparison purposes. The corpora incorporated emotional labels by evaluating the speaker{'}s dynamic emotional states and the measurements of the speaker{'}s facial expression, vocal expression and autonomic nervous system activity. For the initial study of this project, which used a large-scale emotional communication corpus, the accuracy of online emotional understanding was assessed to demonstrate the emotional labels evaluated by the speakers and to summarize the speaker{'}s answers on the questionnaire regarding the difference between an online chat and face-to-face conversations in which they actually participated. The results revealed that speakers have difficulty communicating their emotions in online communication environments, regardless of the type of communication modality and that inaccurate emotional understanding occurs more frequently in online computer-mediated communication than in face-to-face communication.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="arimoto-okanoya-2016-comparison">
<titleInfo>
<title>Comparison of Emotional Understanding in Modality-Controlled Environments using Multimodal Online Emotional Communication Corpus</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yoshiko</namePart>
<namePart type="family">Arimoto</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kazuo</namePart>
<namePart type="family">Okanoya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2016-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC’16)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nicoletta</namePart>
<namePart type="family">Calzolari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Khalid</namePart>
<namePart type="family">Choukri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thierry</namePart>
<namePart type="family">Declerck</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sara</namePart>
<namePart type="family">Goggi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marko</namePart>
<namePart type="family">Grobelnik</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bente</namePart>
<namePart type="family">Maegaard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joseph</namePart>
<namePart type="family">Mariani</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Helene</namePart>
<namePart type="family">Mazo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Asuncion</namePart>
<namePart type="family">Moreno</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jan</namePart>
<namePart type="family">Odijk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stelios</namePart>
<namePart type="family">Piperidis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>European Language Resources Association (ELRA)</publisher>
<place>
<placeTerm type="text">Portorož, Slovenia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In online computer-mediated communication, speakers were considered to have experienced difficulties in catching their partner’s emotions and in conveying their own emotions. To explain why online emotional communication is so difficult and to investigate how this problem should be solved, multimodal online emotional communication corpus was constructed by recording approximately 100 speakers’ emotional expressions and reactions in a modality-controlled environment. Speakers communicated over the Internet using video chat, voice chat or text chat; their face-to-face conversations were used for comparison purposes. The corpora incorporated emotional labels by evaluating the speaker’s dynamic emotional states and the measurements of the speaker’s facial expression, vocal expression and autonomic nervous system activity. For the initial study of this project, which used a large-scale emotional communication corpus, the accuracy of online emotional understanding was assessed to demonstrate the emotional labels evaluated by the speakers and to summarize the speaker’s answers on the questionnaire regarding the difference between an online chat and face-to-face conversations in which they actually participated. The results revealed that speakers have difficulty communicating their emotions in online communication environments, regardless of the type of communication modality and that inaccurate emotional understanding occurs more frequently in online computer-mediated communication than in face-to-face communication.</abstract>
<identifier type="citekey">arimoto-okanoya-2016-comparison</identifier>
<location>
<url>https://aclanthology.org/L16-1343</url>
</location>
<part>
<date>2016-05</date>
<extent unit="page">
<start>2162</start>
<end>2167</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Comparison of Emotional Understanding in Modality-Controlled Environments using Multimodal Online Emotional Communication Corpus
%A Arimoto, Yoshiko
%A Okanoya, Kazuo
%Y Calzolari, Nicoletta
%Y Choukri, Khalid
%Y Declerck, Thierry
%Y Goggi, Sara
%Y Grobelnik, Marko
%Y Maegaard, Bente
%Y Mariani, Joseph
%Y Mazo, Helene
%Y Moreno, Asuncion
%Y Odijk, Jan
%Y Piperidis, Stelios
%S Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC’16)
%D 2016
%8 May
%I European Language Resources Association (ELRA)
%C Portorož, Slovenia
%F arimoto-okanoya-2016-comparison
%X In online computer-mediated communication, speakers were considered to have experienced difficulties in catching their partner’s emotions and in conveying their own emotions. To explain why online emotional communication is so difficult and to investigate how this problem should be solved, multimodal online emotional communication corpus was constructed by recording approximately 100 speakers’ emotional expressions and reactions in a modality-controlled environment. Speakers communicated over the Internet using video chat, voice chat or text chat; their face-to-face conversations were used for comparison purposes. The corpora incorporated emotional labels by evaluating the speaker’s dynamic emotional states and the measurements of the speaker’s facial expression, vocal expression and autonomic nervous system activity. For the initial study of this project, which used a large-scale emotional communication corpus, the accuracy of online emotional understanding was assessed to demonstrate the emotional labels evaluated by the speakers and to summarize the speaker’s answers on the questionnaire regarding the difference between an online chat and face-to-face conversations in which they actually participated. The results revealed that speakers have difficulty communicating their emotions in online communication environments, regardless of the type of communication modality and that inaccurate emotional understanding occurs more frequently in online computer-mediated communication than in face-to-face communication.
%U https://aclanthology.org/L16-1343
%P 2162-2167
Markdown (Informal)
[Comparison of Emotional Understanding in Modality-Controlled Environments using Multimodal Online Emotional Communication Corpus](https://aclanthology.org/L16-1343) (Arimoto & Okanoya, LREC 2016)
ACL