@InProceedings{kafle-yousefhussien-kanan:2017:INLG2017,
  author    = {Kafle, Kushal  and  Yousefhussien, Mohammed  and  Kanan, Christopher},
  title     = {Data Augmentation for Visual Question Answering},
  booktitle = {Proceedings of the 10th International Conference on Natural Language Generation},
  month     = {September},
  year      = {2017},
  address   = {Santiago de Compostela, Spain},
  publisher = {Association for Computational Linguistics},
  pages     = {198--202},
  abstract  = {Data augmentation is widely used to train deep neural networks for image
	classification tasks. Simply flipping images can help learning tremendously by
	increasing the number of training images by a factor of two. However, little
	work has been done studying data augmentation in natural language processing.
	Here, we describe two methods for data augmentation for Visual Question
	Answering (VQA). The first uses existing semantic annotations to generate new
	questions. The second method is a generative approach using recurrent neural
	networks. Experiments show that the proposed data augmentation improves
	performance of both baseline and state-of-the-art VQA algorithms.},
  url       = {http://www.aclweb.org/anthology/W17-3529}
}

