@InProceedings{jang-myaeng-kim:2018:BlackboxNLP,
  author    = {Jang, Kyoung-Rok  and  Myaeng, Sung-Hyon  and  Kim, Sang-Bum},
  title     = {Interpretable Word Embedding Contextualization},
  booktitle = {Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {341--343},
  abstract  = {In this paper, we propose a method of calibrating a word embedding, so that the semantic it conveys becomes more relevant to the context. Our method is novel because the output shows clearly which senses that were originally presented in a target word embedding become stronger or weaker. This is possible by utilizing the technique introduced in \cite{Murphy, Arora2016}, the technique of using sparse coding to recover senses that comprises a word embedding.},
  url       = {http://www.aclweb.org/anthology/W18-5442}
}

