@InProceedings{park-bak-oh:2017:EMNLP2017,
  author    = {Park, Sungjoon  and  Bak, JinYeong  and  Oh, Alice},
  title     = {Rotated Word Vector Representations and their Interpretability},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {401--411},
  abstract  = {Vector representation of words improves performance in various NLP tasks, but
	the high dimensional word vectors are very difficult to interpret. We apply
	several rotation algorithms to the vector representation of words to improve
	the interpretability. Unlike previous approaches that induce sparsity, the
	rotated vectors are interpretable while preserving the expressive performance
	of the original vectors. Furthermore, any prebuilt word vector representation
	can be rotated for improved interpretability. We apply rotation to skipgrams
	and glove and compare the expressive power and interpretability with the
	original vectors and the sparse overcomplete vectors. The results show that the
	rotated vectors outperform the original and the sparse overcomplete vectors for
	interpretability and expressiveness tasks.},
  url       = {https://www.aclweb.org/anthology/D17-1041}
}

