@InProceedings{kaji-kobayashi:2017:EMNLP2017,
  author    = {Kaji, Nobuhiro  and  Kobayashi, Hayato},
  title     = {Incremental Skip-gram Model with Negative Sampling},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {363--371},
  abstract  = {This paper explores an incremental training strategy for the skip-gram model
	with negative sampling (SGNS) from both empirical and theoretical perspectives.
	Existing methods of neural word embeddings, including SGNS, are multi-pass
	algorithms and thus cannot perform incremental model update. To address this
	problem, we present a simple incremental extension of SGNS and provide a
	thorough theoretical analysis to demonstrate its validity. Empirical
	experiments demonstrated the correctness of the theoretical analysis as well as
	the practical usefulness of the incremental algorithm.},
  url       = {https://www.aclweb.org/anthology/D17-1037}
}

