@inproceedings{alhuzali-etal-2018-ubc,
title = "{UBC}-{NLP} at {IEST} 2018: Learning Implicit Emotion With an Ensemble of Language Models",
author = "Alhuzali, Hassan and
Elaraby, Mohamed and
Abdul-Mageed, Muhammad",
editor = "Balahur, Alexandra and
Mohammad, Saif M. and
Hoste, Veronique and
Klinger, Roman",
booktitle = "Proceedings of the 9th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis",
month = oct,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-6250",
doi = "10.18653/v1/W18-6250",
pages = "342--347",
abstract = "We describe UBC-NLP contribution to IEST-2018, focused at learning implicit emotion in Twitter data. Among the 30 participating teams, our system ranked the 4th (with 69.3{\%} \textit{F}-score). Post competition, we were able to score slightly higher than the 3rd ranking system (reaching 70.7{\%}). Our system is trained on top of a pre-trained language model (LM), fine-tuned on the data provided by the task organizers. Our best results are acquired by an average of an ensemble of language models. We also offer an analysis of system performance and the impact of training data size on the task. For example, we show that training our best model for only one epoch with {\textless} 40{\%} of the data enables better performance than the baseline reported by Klinger et al. (2018) for the task.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="alhuzali-etal-2018-ubc">
<titleInfo>
<title>UBC-NLP at IEST 2018: Learning Implicit Emotion With an Ensemble of Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hassan</namePart>
<namePart type="family">Alhuzali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohamed</namePart>
<namePart type="family">Elaraby</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Muhammad</namePart>
<namePart type="family">Abdul-Mageed</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 9th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alexandra</namePart>
<namePart type="family">Balahur</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saif</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Mohammad</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Veronique</namePart>
<namePart type="family">Hoste</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roman</namePart>
<namePart type="family">Klinger</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We describe UBC-NLP contribution to IEST-2018, focused at learning implicit emotion in Twitter data. Among the 30 participating teams, our system ranked the 4th (with 69.3% F-score). Post competition, we were able to score slightly higher than the 3rd ranking system (reaching 70.7%). Our system is trained on top of a pre-trained language model (LM), fine-tuned on the data provided by the task organizers. Our best results are acquired by an average of an ensemble of language models. We also offer an analysis of system performance and the impact of training data size on the task. For example, we show that training our best model for only one epoch with \textless 40% of the data enables better performance than the baseline reported by Klinger et al. (2018) for the task.</abstract>
<identifier type="citekey">alhuzali-etal-2018-ubc</identifier>
<identifier type="doi">10.18653/v1/W18-6250</identifier>
<location>
<url>https://aclanthology.org/W18-6250</url>
</location>
<part>
<date>2018-10</date>
<extent unit="page">
<start>342</start>
<end>347</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T UBC-NLP at IEST 2018: Learning Implicit Emotion With an Ensemble of Language Models
%A Alhuzali, Hassan
%A Elaraby, Mohamed
%A Abdul-Mageed, Muhammad
%Y Balahur, Alexandra
%Y Mohammad, Saif M.
%Y Hoste, Veronique
%Y Klinger, Roman
%S Proceedings of the 9th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis
%D 2018
%8 October
%I Association for Computational Linguistics
%C Brussels, Belgium
%F alhuzali-etal-2018-ubc
%X We describe UBC-NLP contribution to IEST-2018, focused at learning implicit emotion in Twitter data. Among the 30 participating teams, our system ranked the 4th (with 69.3% F-score). Post competition, we were able to score slightly higher than the 3rd ranking system (reaching 70.7%). Our system is trained on top of a pre-trained language model (LM), fine-tuned on the data provided by the task organizers. Our best results are acquired by an average of an ensemble of language models. We also offer an analysis of system performance and the impact of training data size on the task. For example, we show that training our best model for only one epoch with \textless 40% of the data enables better performance than the baseline reported by Klinger et al. (2018) for the task.
%R 10.18653/v1/W18-6250
%U https://aclanthology.org/W18-6250
%U https://doi.org/10.18653/v1/W18-6250
%P 342-347
Markdown (Informal)
[UBC-NLP at IEST 2018: Learning Implicit Emotion With an Ensemble of Language Models](https://aclanthology.org/W18-6250) (Alhuzali et al., WASSA 2018)
ACL