@InProceedings{zhang-lease-wallace:2017:Short,
  author    = {Zhang, Ye  and  Lease, Matthew  and  Wallace, Byron C.},
  title     = {Exploiting Domain Knowledge via Grouped Weight Sharing with Application to Text Categorization},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {155--160},
  abstract  = {A fundamental advantage of neural models
	for NLP is their ability to learn representations
	from scratch. However, in
	practice this often means ignoring existing
	external linguistic resources, e.g., WordNet
	or domain specific ontologies such
	as the Unified Medical Language System
	(UMLS). We propose a general, novel
	method for exploiting such resources via
	weight sharing. Prior work on weight
	sharing in neural networks has considered
	it largely as a means of model compression.
	In contrast, we treat weight sharing
	as a flexible mechanism for incorporating
	prior knowledge into neural models.
	We show that this approach consistently
	yields improved performance on classification tasks compared to baseline
	strategies that do not exploit weight sharing.},
  url       = {http://aclweb.org/anthology/P17-2024}
}

