@inproceedings{gratian-haid-2018-braint,
title = "{B}rain{T} at {IEST} 2018: Fine-tuning Multiclass Perceptron For Implicit Emotion Classification",
author = "Gratian, Vachagan and
Haid, Marina",
editor = "Balahur, Alexandra and
Mohammad, Saif M. and
Hoste, Veronique and
Klinger, Roman",
booktitle = "Proceedings of the 9th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis",
month = oct,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-6235",
doi = "10.18653/v1/W18-6235",
pages = "243--247",
abstract = "We present \textit{BrainT}, a multi-class, averaged perceptron tested on implicit emotion prediction of tweets. We show that the dataset is linearly separable and explore ways in fine-tuning the baseline classifier. Our results indicate that the bag-of-words features benefit the model moderately and prediction can be improved with bigrams, trigrams, \textit{skip-one}-tetragrams and POS-tags. Furthermore, we find preprocessing of the n-grams, including stemming, lowercasing, stopword filtering, emoji and emoticon conversion generally not useful. The model is trained on an annotated corpus of 153,383 tweets and predictions on the test data were submitted to the WASSA-2018 Implicit Emotion Shared Task. BrainT attained a Macro F-score of 0.63.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gratian-haid-2018-braint">
<titleInfo>
<title>BrainT at IEST 2018: Fine-tuning Multiclass Perceptron For Implicit Emotion Classification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vachagan</namePart>
<namePart type="family">Gratian</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marina</namePart>
<namePart type="family">Haid</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 9th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alexandra</namePart>
<namePart type="family">Balahur</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saif</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Mohammad</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Veronique</namePart>
<namePart type="family">Hoste</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roman</namePart>
<namePart type="family">Klinger</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We present BrainT, a multi-class, averaged perceptron tested on implicit emotion prediction of tweets. We show that the dataset is linearly separable and explore ways in fine-tuning the baseline classifier. Our results indicate that the bag-of-words features benefit the model moderately and prediction can be improved with bigrams, trigrams, skip-one-tetragrams and POS-tags. Furthermore, we find preprocessing of the n-grams, including stemming, lowercasing, stopword filtering, emoji and emoticon conversion generally not useful. The model is trained on an annotated corpus of 153,383 tweets and predictions on the test data were submitted to the WASSA-2018 Implicit Emotion Shared Task. BrainT attained a Macro F-score of 0.63.</abstract>
<identifier type="citekey">gratian-haid-2018-braint</identifier>
<identifier type="doi">10.18653/v1/W18-6235</identifier>
<location>
<url>https://aclanthology.org/W18-6235</url>
</location>
<part>
<date>2018-10</date>
<extent unit="page">
<start>243</start>
<end>247</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T BrainT at IEST 2018: Fine-tuning Multiclass Perceptron For Implicit Emotion Classification
%A Gratian, Vachagan
%A Haid, Marina
%Y Balahur, Alexandra
%Y Mohammad, Saif M.
%Y Hoste, Veronique
%Y Klinger, Roman
%S Proceedings of the 9th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis
%D 2018
%8 October
%I Association for Computational Linguistics
%C Brussels, Belgium
%F gratian-haid-2018-braint
%X We present BrainT, a multi-class, averaged perceptron tested on implicit emotion prediction of tweets. We show that the dataset is linearly separable and explore ways in fine-tuning the baseline classifier. Our results indicate that the bag-of-words features benefit the model moderately and prediction can be improved with bigrams, trigrams, skip-one-tetragrams and POS-tags. Furthermore, we find preprocessing of the n-grams, including stemming, lowercasing, stopword filtering, emoji and emoticon conversion generally not useful. The model is trained on an annotated corpus of 153,383 tweets and predictions on the test data were submitted to the WASSA-2018 Implicit Emotion Shared Task. BrainT attained a Macro F-score of 0.63.
%R 10.18653/v1/W18-6235
%U https://aclanthology.org/W18-6235
%U https://doi.org/10.18653/v1/W18-6235
%P 243-247
Markdown (Informal)
[BrainT at IEST 2018: Fine-tuning Multiclass Perceptron For Implicit Emotion Classification](https://aclanthology.org/W18-6235) (Gratian & Haid, WASSA 2018)
ACL