@inproceedings{avvaru-etal-2020-detecting,
title = "{D}etecting {S}arcasm in {C}onversation {C}ontext {U}sing {T}ransformer-{B}ased {M}odels",
author = "Avvaru, Adithya and
Vobilisetty, Sanath and
Mamidi, Radhika",
editor = "Klebanov, Beata Beigman and
Shutova, Ekaterina and
Lichtenstein, Patricia and
Muresan, Smaranda and
Wee, Chee and
Feldman, Anna and
Ghosh, Debanjan",
booktitle = "Proceedings of the Second Workshop on Figurative Language Processing",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.figlang-1.15",
doi = "10.18653/v1/2020.figlang-1.15",
pages = "98--103",
abstract = "Sarcasm detection, regarded as one of the sub-problems of sentiment analysis, is a very typical task because the introduction of sarcastic words can flip the sentiment of the sentence itself. To date, many research works revolve around detecting sarcasm in one single sentence and there is very limited research to detect sarcasm resulting from multiple sentences. Current models used Long Short Term Memory (LSTM) variants with or without attention to detect sarcasm in conversations. We showed that the models using state-of-the-art Bidirectional Encoder Representations from Transformers (BERT), to capture syntactic and semantic information across conversation sentences, performed better than the current models. Based on the data analysis, we estimated that the number of sentences in the conversation that can contribute to the sarcasm and the results agrees to this estimation. We also perform a comparative study of our different versions of BERT-based model with other variants of LSTM model and XLNet (both using the estimated number of conversation sentences) and find out that BERT-based models outperformed them.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="avvaru-etal-2020-detecting">
<titleInfo>
<title>Detecting Sarcasm in Conversation Context Using Transformer-Based Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Adithya</namePart>
<namePart type="family">Avvaru</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sanath</namePart>
<namePart type="family">Vobilisetty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Radhika</namePart>
<namePart type="family">Mamidi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Figurative Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Beata</namePart>
<namePart type="given">Beigman</namePart>
<namePart type="family">Klebanov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Patricia</namePart>
<namePart type="family">Lichtenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chee</namePart>
<namePart type="family">Wee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Feldman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Debanjan</namePart>
<namePart type="family">Ghosh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Sarcasm detection, regarded as one of the sub-problems of sentiment analysis, is a very typical task because the introduction of sarcastic words can flip the sentiment of the sentence itself. To date, many research works revolve around detecting sarcasm in one single sentence and there is very limited research to detect sarcasm resulting from multiple sentences. Current models used Long Short Term Memory (LSTM) variants with or without attention to detect sarcasm in conversations. We showed that the models using state-of-the-art Bidirectional Encoder Representations from Transformers (BERT), to capture syntactic and semantic information across conversation sentences, performed better than the current models. Based on the data analysis, we estimated that the number of sentences in the conversation that can contribute to the sarcasm and the results agrees to this estimation. We also perform a comparative study of our different versions of BERT-based model with other variants of LSTM model and XLNet (both using the estimated number of conversation sentences) and find out that BERT-based models outperformed them.</abstract>
<identifier type="citekey">avvaru-etal-2020-detecting</identifier>
<identifier type="doi">10.18653/v1/2020.figlang-1.15</identifier>
<location>
<url>https://aclanthology.org/2020.figlang-1.15</url>
</location>
<part>
<date>2020-07</date>
<extent unit="page">
<start>98</start>
<end>103</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Detecting Sarcasm in Conversation Context Using Transformer-Based Models
%A Avvaru, Adithya
%A Vobilisetty, Sanath
%A Mamidi, Radhika
%Y Klebanov, Beata Beigman
%Y Shutova, Ekaterina
%Y Lichtenstein, Patricia
%Y Muresan, Smaranda
%Y Wee, Chee
%Y Feldman, Anna
%Y Ghosh, Debanjan
%S Proceedings of the Second Workshop on Figurative Language Processing
%D 2020
%8 July
%I Association for Computational Linguistics
%C Online
%F avvaru-etal-2020-detecting
%X Sarcasm detection, regarded as one of the sub-problems of sentiment analysis, is a very typical task because the introduction of sarcastic words can flip the sentiment of the sentence itself. To date, many research works revolve around detecting sarcasm in one single sentence and there is very limited research to detect sarcasm resulting from multiple sentences. Current models used Long Short Term Memory (LSTM) variants with or without attention to detect sarcasm in conversations. We showed that the models using state-of-the-art Bidirectional Encoder Representations from Transformers (BERT), to capture syntactic and semantic information across conversation sentences, performed better than the current models. Based on the data analysis, we estimated that the number of sentences in the conversation that can contribute to the sarcasm and the results agrees to this estimation. We also perform a comparative study of our different versions of BERT-based model with other variants of LSTM model and XLNet (both using the estimated number of conversation sentences) and find out that BERT-based models outperformed them.
%R 10.18653/v1/2020.figlang-1.15
%U https://aclanthology.org/2020.figlang-1.15
%U https://doi.org/10.18653/v1/2020.figlang-1.15
%P 98-103
Markdown (Informal)
[Detecting Sarcasm in Conversation Context Using Transformer-Based Models](https://aclanthology.org/2020.figlang-1.15) (Avvaru et al., Fig-Lang 2020)
ACL