@InProceedings{cer-EtAl:2018:Demos,
  author    = {Cer, Daniel  and  Yang, Yinfei  and  Kong, Sheng-yi  and  Hua, Nan  and  Limtiaco, Nicole  and  St. John, Rhomni  and  Constant, Noah  and  Guajardo-Cespedes, Mario  and  Yuan, Steve  and  Tar, Chris  and  Strope, Brian  and  Kurzweil, Ray},
  title     = {Universal Sentence Encoder for English},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {169--174},
  abstract  = {We present easy-to-use TensorFlow Hub sentence embedding models having good task transfer performance. Model variants allow for trade-offs between accuracy and compute resources. We report the relationship between model complexity, resources, and transfer performance. Comparisons are made with baselines without transfer learning and to baselines that incorporate word-level transfer. Transfer learning using sentence-level embeddings is shown to outperform models without transfer learning and often those that use only word-level transfer. We show good transfer task performance with minimal training data and obtain encouraging results on word embedding association tests (WEAT) of model bias.},
  url       = {http://www.aclweb.org/anthology/D18-2029}
}

