@InProceedings{he-EtAl:2017:Long2,
  author    = {He, Ruidan  and  Lee, Wee Sun  and  Ng, Hwee Tou  and  Dahlmeier, Daniel},
  title     = {An Unsupervised Neural Attention Model for Aspect Extraction},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {388--397},
  abstract  = {Aspect extraction is an important and challenging task in aspect-based
	sentiment analysis. Existing works tend to apply variants of topic models on
	this task. While fairly successful, these methods usually do not produce highly
	coherent aspects. In this paper, we present a novel neural approach with the
	aim of discovering coherent aspects. The model improves coherence by exploiting
	the distribution of word co-occurrences through the use of neural word
	embeddings. Unlike topic models which typically assume independently generated
	words, word embedding models encourage words that appear in similar contexts to
	be located close to each other in the embedding space. In addition, we use an
	attention mechanism to de-emphasize irrelevant words during training, further
	improving the coherence of aspects. Experimental results on real-life datasets
	demonstrate that our approach discovers more meaningful and coherent aspects,
	and substantially outperforms baseline methods on several evaluation tasks.},
  url       = {http://aclweb.org/anthology/P17-1036}
}

