@inproceedings{jaiswal-2020-neural,
title = "Neural Sarcasm Detection using Conversation Context",
author = "Jaiswal, Nikhil",
editor = "Klebanov, Beata Beigman and
Shutova, Ekaterina and
Lichtenstein, Patricia and
Muresan, Smaranda and
Wee, Chee and
Feldman, Anna and
Ghosh, Debanjan",
booktitle = "Proceedings of the Second Workshop on Figurative Language Processing",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.figlang-1.11",
doi = "10.18653/v1/2020.figlang-1.11",
pages = "77--82",
abstract = "Social media platforms and discussion forums such as Reddit, Twitter, etc. are filled with figurative languages. Sarcasm is one such category of figurative language whose presence in a conversation makes language understanding a challenging task. In this paper, we present a deep neural architecture for sarcasm detection. We investigate various pre-trained language representation models (PLRMs) like BERT, RoBERTa, etc. and fine-tune it on the Twitter dataset. We experiment with a variety of PLRMs either on the twitter utterance in isolation or utilizing the contextual information along with the utterance. Our findings indicate that by taking into consideration the previous three most recent utterances, the model is more accurately able to classify a conversation as being sarcastic or not. Our best performing ensemble model achieves an overall F1 score of 0.790, which ranks us second on the leaderboard of the Sarcasm Shared Task 2020.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jaiswal-2020-neural">
<titleInfo>
<title>Neural Sarcasm Detection using Conversation Context</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nikhil</namePart>
<namePart type="family">Jaiswal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Figurative Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Beata</namePart>
<namePart type="given">Beigman</namePart>
<namePart type="family">Klebanov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Patricia</namePart>
<namePart type="family">Lichtenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chee</namePart>
<namePart type="family">Wee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Feldman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Debanjan</namePart>
<namePart type="family">Ghosh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Social media platforms and discussion forums such as Reddit, Twitter, etc. are filled with figurative languages. Sarcasm is one such category of figurative language whose presence in a conversation makes language understanding a challenging task. In this paper, we present a deep neural architecture for sarcasm detection. We investigate various pre-trained language representation models (PLRMs) like BERT, RoBERTa, etc. and fine-tune it on the Twitter dataset. We experiment with a variety of PLRMs either on the twitter utterance in isolation or utilizing the contextual information along with the utterance. Our findings indicate that by taking into consideration the previous three most recent utterances, the model is more accurately able to classify a conversation as being sarcastic or not. Our best performing ensemble model achieves an overall F1 score of 0.790, which ranks us second on the leaderboard of the Sarcasm Shared Task 2020.</abstract>
<identifier type="citekey">jaiswal-2020-neural</identifier>
<identifier type="doi">10.18653/v1/2020.figlang-1.11</identifier>
<location>
<url>https://aclanthology.org/2020.figlang-1.11</url>
</location>
<part>
<date>2020-07</date>
<extent unit="page">
<start>77</start>
<end>82</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Neural Sarcasm Detection using Conversation Context
%A Jaiswal, Nikhil
%Y Klebanov, Beata Beigman
%Y Shutova, Ekaterina
%Y Lichtenstein, Patricia
%Y Muresan, Smaranda
%Y Wee, Chee
%Y Feldman, Anna
%Y Ghosh, Debanjan
%S Proceedings of the Second Workshop on Figurative Language Processing
%D 2020
%8 July
%I Association for Computational Linguistics
%C Online
%F jaiswal-2020-neural
%X Social media platforms and discussion forums such as Reddit, Twitter, etc. are filled with figurative languages. Sarcasm is one such category of figurative language whose presence in a conversation makes language understanding a challenging task. In this paper, we present a deep neural architecture for sarcasm detection. We investigate various pre-trained language representation models (PLRMs) like BERT, RoBERTa, etc. and fine-tune it on the Twitter dataset. We experiment with a variety of PLRMs either on the twitter utterance in isolation or utilizing the contextual information along with the utterance. Our findings indicate that by taking into consideration the previous three most recent utterances, the model is more accurately able to classify a conversation as being sarcastic or not. Our best performing ensemble model achieves an overall F1 score of 0.790, which ranks us second on the leaderboard of the Sarcasm Shared Task 2020.
%R 10.18653/v1/2020.figlang-1.11
%U https://aclanthology.org/2020.figlang-1.11
%U https://doi.org/10.18653/v1/2020.figlang-1.11
%P 77-82
Markdown (Informal)
[Neural Sarcasm Detection using Conversation Context](https://aclanthology.org/2020.figlang-1.11) (Jaiswal, Fig-Lang 2020)
ACL