@InProceedings{liu-qiu-huang:2017:Long,
  author    = {Liu, Pengfei  and  Qiu, Xipeng  and  Huang, Xuanjing},
  title     = {Adversarial Multi-task Learning for Text Classification},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {1--10},
  abstract  = {Neural network models have shown their promising opportunities for multi-task
	learning, which focus on learning the shared layers to extract the common and
	task-invariant features. However, in most existing approaches, the extracted
	shared features are prone to be contaminated by task-specific features or the
	noise brought by other tasks.
	In this paper, we propose an adversarial multi-task learning framework,
	alleviating the shared and private latent feature spaces from interfering with
	each other.
	We conduct extensive experiments on 16 different text classification tasks,
	which demonstrates the benefits of our approach. Besides, we show that the
	shared knowledge learned by our proposed model can be regarded as off-the-shelf
	knowledge and easily transferred to new tasks.
	The datasets of all 16 tasks are publicly available at
	http://nlp.fudan.edu.cn/data/.},
  url       = {http://aclweb.org/anthology/P17-1001}
}

