@InProceedings{min-EtAl:2017:I17-1,
  author    = {Min, Bonan  and  Jiang, Zhuolin  and  Freedman, Marjorie  and  Weischedel, Ralph},
  title     = {Learning Transferable Representation for Bilingual Relation Extraction via Convolutional Neural Networks},
  booktitle = {Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)},
  month     = {November},
  year      = {2017},
  address   = {Taipei, Taiwan},
  publisher = {Asian Federation of Natural Language Processing},
  pages     = {674--684},
  abstract  = {Typically, relation extraction models are trained to extract instances of a
	relation ontology using only training data from a single language. However, the
	concepts represented by the relation ontology (e.g. ResidesIn, EmployeeOf) are
	language independent. The numbers of annotated examples available for a given
	ontology vary between languages. For example, there are far fewer annotated
	examples in Spanish and Japanese than English and Chinese. Furthermore, using
	only language-specific training data results in the need to manually annotate
	equivalently large amounts of training for each new language a system
	encounters. We propose a deep neural network to learn transferable,
	discriminative bilingual representation. Experiments on the ACE 2005
	multilingual training corpus demonstrate that the joint training process
	results in significant improvement in relation classification performance over
	the monolingual counterparts. The learnt representation is discriminative and
	transferable between languages. When using 10% (25K English words, or 30K
	Chinese characters) of the training data, our approach results in doubling F1
	compared to a monolingual baseline. We achieve comparable performance to the
	monolingual system trained with 250K English words (or 300K Chinese characters)
	With 50% of training data.},
  url       = {http://www.aclweb.org/anthology/I17-1068}
}

