@inproceedings{matero-schwartz-2020-autoregressive,
title = "Autoregressive Affective Language Forecasting: A Self-Supervised Task",
author = "Matero, Matthew and
Schwartz, H. Andrew",
editor = "Scott, Donia and
Bel, Nuria and
Zong, Chengqing",
booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "International Committee on Computational Linguistics",
url = "https://aclanthology.org/2020.coling-main.261",
doi = "10.18653/v1/2020.coling-main.261",
pages = "2913--2923",
abstract = "Human natural language is mentioned at a specific point in time while human emotions change over time. While much work has established a strong link between language use and emotional states, few have attempted to model emotional language in time. Here, we introduce the task of \textit{affective language forecasting} {--} predicting future change in language based on past changes of language, a task with real-world applications such as treating mental health or forecasting trends in consumer confidence. We establish some of the fundamental autoregressive characteristics of the task (necessary history size, static versus dynamic length, varying time-step resolutions) and then build on popular sequence models for \textit{words} to instead model sequences of \textit{language-based emotion in time}. Over a novel Twitter dataset of 1,900 users and weekly + daily scores for 6 emotions and 2 additional linguistic attributes, we find a novel dual-sequence GRU model with decayed hidden states achieves best results ($r = .66$) significantly out-predicting, e.g., a moving averaging based on the past time-steps ($r = .49$). We make our anonymized dataset as well as task setup and evaluation code available for others to build on.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="matero-schwartz-2020-autoregressive">
<titleInfo>
<title>Autoregressive Affective Language Forecasting: A Self-Supervised Task</title>
</titleInfo>
<name type="personal">
<namePart type="given">Matthew</namePart>
<namePart type="family">Matero</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">H</namePart>
<namePart type="given">Andrew</namePart>
<namePart type="family">Schwartz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 28th International Conference on Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Donia</namePart>
<namePart type="family">Scott</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nuria</namePart>
<namePart type="family">Bel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chengqing</namePart>
<namePart type="family">Zong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>International Committee on Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona, Spain (Online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Human natural language is mentioned at a specific point in time while human emotions change over time. While much work has established a strong link between language use and emotional states, few have attempted to model emotional language in time. Here, we introduce the task of affective language forecasting – predicting future change in language based on past changes of language, a task with real-world applications such as treating mental health or forecasting trends in consumer confidence. We establish some of the fundamental autoregressive characteristics of the task (necessary history size, static versus dynamic length, varying time-step resolutions) and then build on popular sequence models for words to instead model sequences of language-based emotion in time. Over a novel Twitter dataset of 1,900 users and weekly + daily scores for 6 emotions and 2 additional linguistic attributes, we find a novel dual-sequence GRU model with decayed hidden states achieves best results (r = .66) significantly out-predicting, e.g., a moving averaging based on the past time-steps (r = .49). We make our anonymized dataset as well as task setup and evaluation code available for others to build on.</abstract>
<identifier type="citekey">matero-schwartz-2020-autoregressive</identifier>
<identifier type="doi">10.18653/v1/2020.coling-main.261</identifier>
<location>
<url>https://aclanthology.org/2020.coling-main.261</url>
</location>
<part>
<date>2020-12</date>
<extent unit="page">
<start>2913</start>
<end>2923</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Autoregressive Affective Language Forecasting: A Self-Supervised Task
%A Matero, Matthew
%A Schwartz, H. Andrew
%Y Scott, Donia
%Y Bel, Nuria
%Y Zong, Chengqing
%S Proceedings of the 28th International Conference on Computational Linguistics
%D 2020
%8 December
%I International Committee on Computational Linguistics
%C Barcelona, Spain (Online)
%F matero-schwartz-2020-autoregressive
%X Human natural language is mentioned at a specific point in time while human emotions change over time. While much work has established a strong link between language use and emotional states, few have attempted to model emotional language in time. Here, we introduce the task of affective language forecasting – predicting future change in language based on past changes of language, a task with real-world applications such as treating mental health or forecasting trends in consumer confidence. We establish some of the fundamental autoregressive characteristics of the task (necessary history size, static versus dynamic length, varying time-step resolutions) and then build on popular sequence models for words to instead model sequences of language-based emotion in time. Over a novel Twitter dataset of 1,900 users and weekly + daily scores for 6 emotions and 2 additional linguistic attributes, we find a novel dual-sequence GRU model with decayed hidden states achieves best results (r = .66) significantly out-predicting, e.g., a moving averaging based on the past time-steps (r = .49). We make our anonymized dataset as well as task setup and evaluation code available for others to build on.
%R 10.18653/v1/2020.coling-main.261
%U https://aclanthology.org/2020.coling-main.261
%U https://doi.org/10.18653/v1/2020.coling-main.261
%P 2913-2923
Markdown (Informal)
[Autoregressive Affective Language Forecasting: A Self-Supervised Task](https://aclanthology.org/2020.coling-main.261) (Matero & Schwartz, COLING 2020)
ACL