@InProceedings{sharma-parekh-talukdar:2017:EMNLP2017,
  author    = {Sharma, Aditya  and  Parekh, Zarana  and  Talukdar, Partha},
  title     = {Speeding up Reinforcement Learning-based Information Extraction Training using Asynchronous Methods},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {2658--2663},
  abstract  = {RLIE-DQN is a recently proposed Reinforcement Learning-based Information
	Extraction (IE) technique which is able to incorporate external evidence during
	the extraction process. RLIE-DQN trains a single agent sequentially, training
	on one instance at a time. This results in significant training slowdown which
	is undesirable. We leverage recent advances in parallel RL training using
	asynchronous methods and propose RLIE-A3C. RLIE-A3C trains multiple agents in
	parallel and is able to achieve upto 6x training speedup over RLIE-DQN, while
	suffering no loss in average accuracy.},
  url       = {https://www.aclweb.org/anthology/D17-1281}
}

