@InProceedings{yang-mukherjee-zhang:2016:COLING,
  author    = {Yang, Fan  and  Mukherjee, Arjun  and  Zhang, Yifan},
  title     = {Leveraging Multiple Domains for Sentiment Classification},
  booktitle = {Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers},
  month     = {December},
  year      = {2016},
  address   = {Osaka, Japan},
  publisher = {The COLING 2016 Organizing Committee},
  pages     = {2978--2988},
  abstract  = {Sentiment classification becomes more and more important with the rapid growth
	of user generated content. However, sentiment classification task usually comes
	with two challenges: first, sentiment classification is highly
	domain-dependent and training sentiment classifier for every domain is
	inefficient and often impractical; second, since the quantity of labeled data
	is
	important for assessing the quality of classifier, it is hard to evaluate
	classifiers when labeled data is limited for certain domains. To address the
	challenges mentioned above, we focus on learning high-level features that are
	able to generalize across domains, so a global classifier can benefit with
	a simple combination of documents from multiple domains. In this paper, the
	proposed model incorporates both sentiment polarity and unlabeled data from
	multiple domains and learns new feature representations. Our model doesn't
	require labels from every domain, which means the learned feature
	representation can be generalized for sentiment domain adaptation. In addition,
	the learned feature representation can be used as classifier since our model
	defines the meaning of feature value and arranges high-level features in a
	prefixed order, so it is not necessary to train another classifier on top of
	the new features. Empirical evaluations demonstrate our model outperforms
	baselines and yields competitive results to other state-of-the-art works on
	benchmark datasets.},
  url       = {http://aclweb.org/anthology/C16-1280}
}

