@InProceedings{goel-sharma:2019:S19-2,
  author    = {Goel, Bharti  and  Sharma, Ravi},
  title     = {USF at SemEval-2019 Task 6: Offensive Language Detection Using LSTM With Word Embeddings},
  booktitle = {Proceedings of the 13th International Workshop on Semantic Evaluation},
  month     = {June},
  year      = {2019},
  address   = {Minneapolis, Minnesota, USA},
  publisher = {Association for Computational Linguistics},
  pages     = {796--800},
  abstract  = {In this paper, we present a system description for the SemEval-2019 Task 6 submitted by our team. For the task, our system takes tweet as an input and determine if the tweet is offensive or non-offensive (Sub-task A). In case a tweet is offensive, our system identifies if a tweet is targeted (insult or threat) or non-targeted like swearing (Sub-task B). In targeted tweets, our system identifies the target as an individual or group (Sub-task C). We used data pre-processing techniques like splitting hashtags into words, removing special characters, stop-word removal, stemming, lemmatization, capitalization, and offensive word dictionary. Later, we used keras tokenizer and word embeddings for feature extraction. For classification, we used the LSTM (Long short-term memory) model of keras framework. Our accuracy scores for Sub-task A, B and C are \textit{0.8128}, \textit{0.8167} and \textit{0.3662} respectively. Our results indicate that fine-grained classification to identify offense target was difficult for the system. Lastly, in the future scope section, we will discuss the ways to improve system performance.},
  url       = {http://www.aclweb.org/anthology/S19-2139}
}

