@InProceedings{kim-EtAl:2017:I17-2,
  author    = {kim, kangil  and  Shin, Jong-Hun  and  Na, Seung-Hoon  and  Jung, SangKeun},
  title     = {Concept Equalization to Guide Correct Training of Neural Machine Translation},
  booktitle = {Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 2: Short Papers)},
  month     = {November},
  year      = {2017},
  address   = {Taipei, Taiwan},
  publisher = {Asian Federation of Natural Language Processing},
  pages     = {302--307},
  abstract  = {Neural machine translation decoders are usually conditional language models to
	sequentially generate words
	for target sentences.
	This approach is limited to find the best word composition and requires help of
	explicit methods as beam search.
	To help learning correct compositional mechanisms in NMTs,
	we propose concept equalization using direct mapping distributed
	representations of source and target sentences.
	In a translation experiment from English to French, the concept equalization
	significantly improved translation quality by 3.00 BLEU points
	compared to a state-of-the-art NMT model.},
  url       = {http://www.aclweb.org/anthology/I17-2051}
}

