@InProceedings{torabiasr-jones:2017:CoNLL,
  author    = {Torabi Asr, Fatemeh  and  Jones, Michael},
  title     = {An Artificial Language Evaluation of Distributional Semantic Models},
  booktitle = {Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {134--142},
  abstract  = {Recent studies of distributional semantic models have set up a competition
	between word embeddings obtained from predictive neural networks and word
	vectors obtained from abstractive count-based models. This paper is an attempt
	to reveal the underlying contribution of additional training data and
	post-processing steps on each type of model in word similarity and relatedness
	inference tasks. We do so by designing an artificial language framework,
	training a predictive and a count-based model on data sampled from this
	grammar, and evaluating the resulting word vectors in paradigmatic and
	syntagmatic tasks defined with respect to the grammar.},
  url       = {http://aclweb.org/anthology/K17-1015}
}

