@InProceedings{kim-stratos-kim:2017:Long1,
  author    = {Kim, Young-Bum  and  Stratos, Karl  and  Kim, Dongchan},
  title     = {Domain Attention with an Ensemble of Experts},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {643--653},
  abstract  = {An important problem in domain adaptation is to quickly generalize to a new
	domain with limited supervision given K existing domains. One approach is to
	retrain a global model across all K + 1 domains using standard techniques, for
	instance Daum´e III (2009). However, it is desirable to adapt without having
	to re-estimate a global model from scratch each time a new domain with
	potentially new intents and slots is added. We describe a solution based on
	attending an ensemble of domain experts. We assume K domain specific intent and
	slot models trained on respective domains. When given domain K + 1, our model
	uses a weighted combination of the K domain experts’ feedback along with its
	own opinion to make predictions on the new domain. In experiments,
	the model significantly outperforms baselines that do not use domain adaptation
	and also performs better than the full retraining approach.},
  url       = {http://aclweb.org/anthology/P17-1060}
}

