@inproceedings{dong-etal-2020-transformer,
title = "Transformer-based Context-aware Sarcasm Detection in Conversation Threads from Social Media",
author = "Dong, Xiangjue and
Li, Changmao and
Choi, Jinho D.",
editor = "Klebanov, Beata Beigman and
Shutova, Ekaterina and
Lichtenstein, Patricia and
Muresan, Smaranda and
Wee, Chee and
Feldman, Anna and
Ghosh, Debanjan",
booktitle = "Proceedings of the Second Workshop on Figurative Language Processing",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.figlang-1.38",
doi = "10.18653/v1/2020.figlang-1.38",
pages = "276--280",
abstract = "We present a transformer-based sarcasm detection model that accounts for the context from the entire conversation thread for more robust predictions. Our model uses deep transformer layers to perform multi-head attentions among the target utterance and the relevant context in the thread. The context-aware models are evaluated on two datasets from social media, Twitter and Reddit, and show 3.1{\%} and 7.0{\%} improvements over their baselines. Our best models give the F1-scores of 79.0{\%} and 75.0{\%} for the Twitter and Reddit datasets respectively, becoming one of the highest performing systems among 36 participants in this shared task.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="dong-etal-2020-transformer">
<titleInfo>
<title>Transformer-based Context-aware Sarcasm Detection in Conversation Threads from Social Media</title>
</titleInfo>
<name type="personal">
<namePart type="given">Xiangjue</namePart>
<namePart type="family">Dong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Changmao</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jinho</namePart>
<namePart type="given">D</namePart>
<namePart type="family">Choi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Figurative Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Beata</namePart>
<namePart type="given">Beigman</namePart>
<namePart type="family">Klebanov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Patricia</namePart>
<namePart type="family">Lichtenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chee</namePart>
<namePart type="family">Wee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Feldman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Debanjan</namePart>
<namePart type="family">Ghosh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We present a transformer-based sarcasm detection model that accounts for the context from the entire conversation thread for more robust predictions. Our model uses deep transformer layers to perform multi-head attentions among the target utterance and the relevant context in the thread. The context-aware models are evaluated on two datasets from social media, Twitter and Reddit, and show 3.1% and 7.0% improvements over their baselines. Our best models give the F1-scores of 79.0% and 75.0% for the Twitter and Reddit datasets respectively, becoming one of the highest performing systems among 36 participants in this shared task.</abstract>
<identifier type="citekey">dong-etal-2020-transformer</identifier>
<identifier type="doi">10.18653/v1/2020.figlang-1.38</identifier>
<location>
<url>https://aclanthology.org/2020.figlang-1.38</url>
</location>
<part>
<date>2020-07</date>
<extent unit="page">
<start>276</start>
<end>280</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Transformer-based Context-aware Sarcasm Detection in Conversation Threads from Social Media
%A Dong, Xiangjue
%A Li, Changmao
%A Choi, Jinho D.
%Y Klebanov, Beata Beigman
%Y Shutova, Ekaterina
%Y Lichtenstein, Patricia
%Y Muresan, Smaranda
%Y Wee, Chee
%Y Feldman, Anna
%Y Ghosh, Debanjan
%S Proceedings of the Second Workshop on Figurative Language Processing
%D 2020
%8 July
%I Association for Computational Linguistics
%C Online
%F dong-etal-2020-transformer
%X We present a transformer-based sarcasm detection model that accounts for the context from the entire conversation thread for more robust predictions. Our model uses deep transformer layers to perform multi-head attentions among the target utterance and the relevant context in the thread. The context-aware models are evaluated on two datasets from social media, Twitter and Reddit, and show 3.1% and 7.0% improvements over their baselines. Our best models give the F1-scores of 79.0% and 75.0% for the Twitter and Reddit datasets respectively, becoming one of the highest performing systems among 36 participants in this shared task.
%R 10.18653/v1/2020.figlang-1.38
%U https://aclanthology.org/2020.figlang-1.38
%U https://doi.org/10.18653/v1/2020.figlang-1.38
%P 276-280
Markdown (Informal)
[Transformer-based Context-aware Sarcasm Detection in Conversation Threads from Social Media](https://aclanthology.org/2020.figlang-1.38) (Dong et al., Fig-Lang 2020)
ACL