@InProceedings{jha-mamidi:2017:NLPandCSS,
  author    = {Jha, Akshita  and  Mamidi, Radhika},
  title     = {When does a compliment become sexist? Analysis and classification of ambivalent sexism using twitter data},
  booktitle = {Proceedings of the Second Workshop on NLP and Computational Social Science},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {7--16},
  abstract  = {Sexism is prevalent in today’s society, both offline and online, and poses a
	credible threat to social equality with respect to gender. According to
	ambivalent sexism theory (Glick and Fiske, 1996), it comes in two forms:
	Hostile and Benevolent. While hostile sexism is characterized by an explicitly
	negative attitude, benevolent sexism is more subtle. Previous works on
	computationally detecting sexism present online are restricted to identifying
	the hostile form. Our objective is to investigate the less pronounced form of
	sexism demonstrated online. We achieve this by creating and analyzing a dataset
	of tweets that exhibit benevolent sexism. By using Support Vector Machines
	(SVM), sequence-to-sequence models and FastText classifier, we classify tweets
	into ‘Hostile’, ‘Benevolent’ or ‘Others’ class depending on the
	kind of sexism they exhibit. We have been able to achieve an F1-score of 87.22%
	using FastText classifier. Our work helps analyze and understand the much
	prevalent ambivalent sexism in social media.},
  url       = {http://www.aclweb.org/anthology/W17-2902}
}

