@InProceedings{choi-EtAl:2018:W18-65,
  author    = {Choi, Hyungtak  and  K.M., Siddarth  and  Yang, Haehun  and  Jeon, Heesik  and  Hwang, Inchul  and  Kim, Jihie},
  title     = {Self-Learning Architecture for Natural Language Generation},
  booktitle = {Proceedings of the 11th International Conference on Natural Language Generation},
  month     = {September},
  year      = {2018},
  address   = {Tilburg University, The Netherlands},
  publisher = {Association for Computational Linguistics},
  pages     = {165--170},
  abstract  = {In this paper, we propose a self-learning architecture for generating natural language templates for conversational assistants. Generating templates to cover all the combinations of slots in an intent is time consuming and labor-intensive. We examine three different models based on our proposed architecture - Rule-based model, Sequence-to-Sequence (Seq2Seq) model and Semantically Conditioned LSTM (SC-LSTM) model for the IoT domain - to reduce the human labor required for template generation. We demonstrate the feasibility of template generation for the IoT domain using our self-learning architecture. In both automatic and human evaluation, the self-learning architecture performs better than previous works trained with a fully human-labeled dataset. This is promising for commercial conversational assistant solutions.},
  url       = {http://www.aclweb.org/anthology/W18-6520}
}

