@inproceedings{parameswaran-etal-2021-berts,
title = "{BERT}`s The Word : Sarcasm Target Detection using {BERT}",
author = "Parameswaran, Pradeesh and
Trotman, Andrew and
Liesaputra, Veronica and
Eyers, David",
editor = "Rahimi, Afshin and
Lane, William and
Zuccon, Guido",
booktitle = "Proceedings of the 19th Annual Workshop of the Australasian Language Technology Association",
month = dec,
year = "2021",
address = "Online",
publisher = "Australasian Language Technology Association",
url = "https://aclanthology.org/2021.alta-1.21/",
pages = "185--191",
abstract = "In 2019, the Australasian Language Technology Association (ALTA) organised a shared task to detect the target of sarcastic comments posted on social media. However, there were no winners as it proved to be a difficult task. In this work, we revisit the task posted by ALTA by using transformers, specifically BERT, given the current success of the transformer-based model in various NLP tasks. We conducted our experiments on two BERT models (TD-BERT and BERT-AEN). We evaluated our model on the data set provided by ALTA (Reddit) and two additional data sets: {\textquoteleft}book snippets' and {\textquoteleft}Tweets'. Our results show that our proposed method achieves a 15.2{\%} improvement from the current state-of-the-art system on the Reddit data set and 4{\%} improvement on Tweets."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="parameswaran-etal-2021-berts">
<titleInfo>
<title>BERT‘s The Word : Sarcasm Target Detection using BERT</title>
</titleInfo>
<name type="personal">
<namePart type="given">Pradeesh</namePart>
<namePart type="family">Parameswaran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andrew</namePart>
<namePart type="family">Trotman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Veronica</namePart>
<namePart type="family">Liesaputra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Eyers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 19th Annual Workshop of the Australasian Language Technology Association</title>
</titleInfo>
<name type="personal">
<namePart type="given">Afshin</namePart>
<namePart type="family">Rahimi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">William</namePart>
<namePart type="family">Lane</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guido</namePart>
<namePart type="family">Zuccon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Australasian Language Technology Association</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In 2019, the Australasian Language Technology Association (ALTA) organised a shared task to detect the target of sarcastic comments posted on social media. However, there were no winners as it proved to be a difficult task. In this work, we revisit the task posted by ALTA by using transformers, specifically BERT, given the current success of the transformer-based model in various NLP tasks. We conducted our experiments on two BERT models (TD-BERT and BERT-AEN). We evaluated our model on the data set provided by ALTA (Reddit) and two additional data sets: ‘book snippets’ and ‘Tweets’. Our results show that our proposed method achieves a 15.2% improvement from the current state-of-the-art system on the Reddit data set and 4% improvement on Tweets.</abstract>
<identifier type="citekey">parameswaran-etal-2021-berts</identifier>
<location>
<url>https://aclanthology.org/2021.alta-1.21/</url>
</location>
<part>
<date>2021-12</date>
<extent unit="page">
<start>185</start>
<end>191</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T BERT‘s The Word : Sarcasm Target Detection using BERT
%A Parameswaran, Pradeesh
%A Trotman, Andrew
%A Liesaputra, Veronica
%A Eyers, David
%Y Rahimi, Afshin
%Y Lane, William
%Y Zuccon, Guido
%S Proceedings of the 19th Annual Workshop of the Australasian Language Technology Association
%D 2021
%8 December
%I Australasian Language Technology Association
%C Online
%F parameswaran-etal-2021-berts
%X In 2019, the Australasian Language Technology Association (ALTA) organised a shared task to detect the target of sarcastic comments posted on social media. However, there were no winners as it proved to be a difficult task. In this work, we revisit the task posted by ALTA by using transformers, specifically BERT, given the current success of the transformer-based model in various NLP tasks. We conducted our experiments on two BERT models (TD-BERT and BERT-AEN). We evaluated our model on the data set provided by ALTA (Reddit) and two additional data sets: ‘book snippets’ and ‘Tweets’. Our results show that our proposed method achieves a 15.2% improvement from the current state-of-the-art system on the Reddit data set and 4% improvement on Tweets.
%U https://aclanthology.org/2021.alta-1.21/
%P 185-191
Markdown (Informal)
[BERT’s The Word : Sarcasm Target Detection using BERT](https://aclanthology.org/2021.alta-1.21/) (Parameswaran et al., ALTA 2021)
ACL
- Pradeesh Parameswaran, Andrew Trotman, Veronica Liesaputra, and David Eyers. 2021. BERT’s The Word : Sarcasm Target Detection using BERT. In Proceedings of the 19th Annual Workshop of the Australasian Language Technology Association, pages 185–191, Online. Australasian Language Technology Association.