@InProceedings{saha-EtAl:2016:COLING,
  author    = {Saha, Amrita  and  Khapra, Mitesh M.  and  Chandar, Sarath  and  Rajendran, Janarthanan  and  Cho, Kyunghyun},
  title     = {A Correlational Encoder Decoder Architecture for Pivot Based Sequence Generation},
  booktitle = {Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers},
  month     = {December},
  year      = {2016},
  address   = {Osaka, Japan},
  publisher = {The COLING 2016 Organizing Committee},
  pages     = {109--118},
  abstract  = {Interlingua based Machine Translation (MT) aims to encode multiple languages
	into a common linguistic representation and then decode sentences in multiple
	target languages from this representation. In this work we explore this idea in
	the context of neural encoder decoder architectures, albeit on a smaller scale
	and without MT as the end goal. Specifically, we consider the case of three
	languages or modalities X, Z and Y wherein we are interested in generating
	sequences in Y starting from information available in X. However, there is no
	parallel training data available between X and Y but, training data is
	available between X \& Z and Z \& Y (as is often the case in many real world
	applications). Z thus acts as a pivot/bridge. An obvious solution, which is
	perhaps less elegant but works very well in practice is to train a two stage
	model which first converts from X to Z and then from Z to Y. Instead we explore
	an interlingua inspired solution which jointly learns to do the following (i)
	encode X and Z to a common representation and (ii) decode Y from this common
	representation. We evaluate our model on two tasks: (i) bridge transliteration
	and (ii) bridge captioning. We report promising results in both these
	applications and believe that this is a right step towards truly interlingua
	inspired encoder decoder architectures.},
  url       = {http://aclweb.org/anthology/C16-1011}
}

