@InProceedings{biemann:2016:CogALex-V,
  author    = {Biemann, Chris},
  title     = {Vectors or Graphs? On Differences of Representations for Distributional Semantic Models},
  booktitle = {Proceedings of the 5th Workshop on Cognitive Aspects of the Lexicon (CogALex - V)},
  month     = {December},
  year      = {2016},
  address   = {Osaka, Japan},
  publisher = {The COLING 2016 Organizing Committee},
  pages     = {1--7},
  abstract  = {Distributional Semantic Models (DSMs) have recently received increased
	attention, together with the rise of neural architectures for scalable training
	of dense vector embeddings. While some of the literature even includes terms
	like 'vectors' and 'dimensionality' in the definition of DSMs, there are some
	good reasons why we should consider alternative formulations of distributional
	models. As an instance, I present a scalable graph-based solution to
	distributional semantics. The model belongs to the family of 'count-based'
	DSMs, keeps its representation sparse and explicit, and thus fully
	interpretable. 
	I will highlight some important differences between sparse graph-based and
	dense vector approaches to DSMs: while dense vector-based models are
	computationally easier to handle and provide a nice uniform representation that
	can be compared and combined in many ways, they lack interpretability,
	provenance and robustness. On the other hand, graph-based sparse models have a
	more straightforward interpretation, handle sense distinctions more naturally
	and can straightforwardly be linked to knowledge bases, while lacking the
	ability to compare arbitrary lexical units and a compositionality operation. 
	Since both representations have their merits, I opt for exploring their
	combination in the outlook.},
  url       = {http://aclweb.org/anthology/W16-5301}
}

