@InProceedings{long-EtAl:2017:WAT2017,
  author    = {Long, Zi  and  Kimura, Ryuichiro  and  Utsuro, Takehito  and  Mitsuhashi, Tomoharu  and  Yamamoto, Mikio},
  title     = {Patent NMT integrated with Large Vocabulary Phrase Translation by SMT at WAT 2017},
  booktitle = {Proceedings of the 4th Workshop on Asian Translation (WAT2017)},
  month     = {November},
  year      = {2017},
  address   = {Taipei, Taiwan},
  publisher = {Asian Federation of Natural Language Processing},
  pages     = {110--118},
  abstract  = {Neural machine translation (NMT) cannot handle a larger vocabulary
	 because the training complexity and decoding complexity proportionally
	 increase with the number of target words. This problem becomes even
	 more serious when translating patent documents, which contain many
	 technical terms that are observed infrequently.  Long et al.(2017)
	 proposed to select phrases that contain out-of-vocabulary words using
	 the statistical approach of branching entropy.  The selected phrases
	 are then replaced with tokens during training and post-translated by
	 the phrase translation table of SMT.  In this paper, we apply the
	 method proposed by Long et al. (2017) to the WAT 2017 Japanese-Chinese
	 and Japanese-English patent datasets.                                     
	Evaluation on
	 Japanese-to-Chinese, Chinese-to-Japanese, Japanese-to-English and
	 English-to-Japanese patent sentence translation proved the
	 effectiveness of phrases selected with branching entropy, where the NMT
	 model of Long et al.(2017) achieves a substantial improvement over a
	 baseline NMT model without the technique proposed by Long et al.(2017).},
  url       = {http://www.aclweb.org/anthology/W17-5709}
}

