@InProceedings{ushiku-EtAl:2017:I17-1,
  author    = {Ushiku, Atsushi  and  Hashimoto, Hayato  and  Hashimoto, Atsushi  and  Mori, Shinsuke},
  title     = {Procedural Text Generation from an Execution Video},
  booktitle = {Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)},
  month     = {November},
  year      = {2017},
  address   = {Taipei, Taiwan},
  publisher = {Asian Federation of Natural Language Processing},
  pages     = {326--335},
  abstract  = {In recent years, there has been a surge of interest in automatically describing
	  images or videos in a natural language.  These descriptions are useful for
	  image/video search, etc.  In this paper, we focus on procedure execution
	videos,
	  in which a human makes or repairs something and propose a method for
	generating
	  procedural texts from them.  Since video/text pairs available are limited in
	size,
	  the direct application of end-to-end deep learning is not feasible.  Thus we
	propose to
	  train Faster R-CNN network for object recognition and LSTM for text
	generation
	  and combine them at run time.  We took pairs of recipe and cooking video,
	  generated a recipe from a video, and compared it with the original recipe.
	  The experimental results showed that our method can produce a recipe as
	accurate
	  as the state-of-the-art scene descriptions.},
  url       = {http://www.aclweb.org/anthology/I17-1033}
}

