@InProceedings{xu-yang:2017:Long,
  author    = {Xu, Ruochen  and  Yang, Yiming},
  title     = {Cross-lingual Distillation for Text Classification},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {1415--1425},
  abstract  = {Cross-lingual text classification(CLTC) is the task of classifying documents
	written in different languages into the same taxonomy of categories. 
	This paper presents a novel approach to CLTC that builds on model distillation,
	which adapts and extends a framework originally proposed for model compression.
	Using soft probabilistic predictions for the documents in a label-rich language
	as the (induced) supervisory labels in a parallel corpus of documents, we train
	classifiers successfully for new languages in which labeled training data are
	not available. An adversarial feature adaptation technique is also applied
	during the model training to reduce distribution mismatch. We conducted
	experiments on two benchmark CLTC datasets, treating English as the source
	language and German, French, Japan and Chinese as the unlabeled target
	languages. The proposed approach had the advantageous or comparable performance
	of the other state-of-art methods.},
  url       = {http://aclweb.org/anthology/P17-1130}
}

