@InProceedings{tang-EtAl:2018:W18-30,
  author    = {Tang, Shuai  and  Jin, Hailin  and  Fang, Chen  and  Wang, Zhaowen  and  de Sa, Virginia},
  title     = {Speeding up Context-based Sentence Representation Learning with Non-autoregressive Convolutional Decoding},
  booktitle = {Proceedings of The Third Workshop on Representation Learning for NLP},
  month     = {July},
  year      = {2018},
  address   = {Melbourne, Australia},
  publisher = {Association for Computational Linguistics},
  pages     = {69--78},
  abstract  = {We propose an asymmetric encoder-decoder structure, which keeps an RNN as the encoder and has a CNN as the decoder, and the model only explores the subsequent context information as the supervision. The asymmetry in both model architecture and training pair reduces a large amount of the training time. The contribution of our work is summarized as 1. We design experiments to show that an autoregressive decoder or an RNN decoder is not necessary for the encoder-decoder type of models in terms of learning sentence representations, and based on our results, we present 2 findings. 2. The two interesting findings lead to our final model design, which has an RNN encoder and a CNN decoder, and it learns to encode the current sentence and decode the subsequent contiguous words all at once. 3. With a suite of techniques, our model performs good on downstream tasks and can be trained efficiently on a large unlabelled corpus.},
  url       = {http://www.aclweb.org/anthology/W18-3009}
}

