@InProceedings{kerinec-braud-sgaard:2018:BlackboxNLP,
  author    = {Kerinec, Emma  and  Braud, Chloé  and  Søgaard, Anders},
  title     = {When does deep multi-task learning work for loosely related document classification tasks?},
  booktitle = {Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {1--8},
  abstract  = {This work aims to contribute to our understanding of {\em when}~multi-task learning through parameter sharing in deep neural networks leads to improvements over single-task learning. We focus on the setting of learning from {\em loosely related}~tasks, for which no theoretical guarantees exist. We therefore approach the question empirically, studying which properties of datasets and single-task learning characteristics correlate with improvements from multi-task learning. We are the first to study this in a text classification setting and across more than 500 different task pairs.},
  url       = {http://www.aclweb.org/anthology/W18-5401}
}

