@InProceedings{levy-EtAl:2017:CoNLL,
  author    = {Levy, Omer  and  Seo, Minjoon  and  Choi, Eunsol  and  Zettlemoyer, Luke},
  title     = {Zero-Shot Relation Extraction via Reading Comprehension},
  booktitle = {Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {333--342},
  abstract  = {We show that relation extraction can be reduced to answering simple reading
	comprehension questions, by associating one or more natural-language questions
	with each relation slot. This reduction has several advantages: we can (1)
	learn relation-extraction models by extending recent neural
	reading-comprehension techniques, (2) build very large training sets for those
	models by combining relation-specific crowd-sourced questions with distant
	supervision, and even (3) do zero-shot learning by extracting new relation
	types that are only specified at test-time, for which we have no labeled
	training examples. Experiments on a Wikipedia slot-filling task demonstrate
	that the approach can generalize to new questions for known relation types with
	high accuracy, and that zero-shot generalization to unseen relation types is
	possible, at lower accuracy levels, setting the bar for future work on this
	task.},
  url       = {http://aclweb.org/anthology/K17-1034}
}

