@InProceedings{senellart-EtAl:2018:WNMT2018,
  author    = {Senellart, Jean  and  Zhang, Dakun  and  WANG, Bo  and  KLEIN, Guillaume  and  Ramatchandirin, Jean-Pierre  and  Crego, Josep  and  Rush, Alexander},
  title     = {OpenNMT System Description for WNMT 2018: 800 words/sec on a single-core CPU},
  booktitle = {Proceedings of the 2nd Workshop on Neural Machine Translation and Generation},
  month     = {July},
  year      = {2018},
  address   = {Melbourne, Australia},
  publisher = {Association for Computational Linguistics},
  pages     = {122--128},
  abstract  = {We present a system description of the OpenNMT Neural Machine Translation entry for the WNMT 2018 evaluation. In this work, we developed a heavily optimized NMT inference model targeting a high-performance CPU system. The final system uses a combination of four techniques, all of them lead to significant speed-ups in combination: (a) sequence distillation, (b) architecture modifications, (c) precomputation, particularly of vocabulary, and (d) CPU targeted quantization. This work achieves the fastest performance of the shared task, and led to the development of new features that have been integrated to OpenNMT and available to the community.},
  url       = {http://www.aclweb.org/anthology/W18-2715}
}

