@InProceedings{hasegawa-EtAl:2017:Short,
  author    = {Hasegawa, Shun  and  Kikuchi, Yuta  and  Takamura, Hiroya  and  Okumura, Manabu},
  title     = {Japanese Sentence Compression with a Large Training Dataset},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {281--286},
  abstract  = {In English, high-quality sentence compression models by deleting words have
	been trained on automatically created large training datasets. We work on
	Japanese sentence compression by a similar approach. To create a large Japanese
	training dataset, a method of creating English training dataset is modified
	based on the characteristics of the Japanese language. The created dataset is
	used to train Japanese sentence compression models based on the recurrent
	neural network.},
  url       = {http://aclweb.org/anthology/P17-2044}
}

