@InProceedings{zeman-EtAl:2017:K17-3,
  author    = {Zeman, Daniel  and  Popel, Martin  and  Straka, Milan  and  Hajic, Jan  and  Nivre, Joakim  and  Ginter, Filip  and  Luotolahti, Juhani  and  Pyysalo, Sampo  and  Petrov, Slav  and  Potthast, Martin  and  Tyers, Francis  and  Badmaeva, Elena  and  Gokirmak, Memduh  and  Nedoluzhko, Anna  and  Cinkova, Silvie  and  Hajic jr., Jan  and  Hlavacova, Jaroslava  and  Kettnerov\'{a}, V\'{a}clava  and  Uresova, Zdenka  and  Kanerva, Jenna  and  Ojala, Stina  and  Missil\"{a}, Anna  and  Manning, Christopher D.  and  Schuster, Sebastian  and  Reddy, Siva  and  Taji, Dima  and  Habash, Nizar  and  Leung, Herman  and  de Marneffe, Marie-Catherine  and  Sanguinetti, Manuela  and  Simi, Maria  and  Kanayama, Hiroshi  and  dePaiva, Valeria  and  Droganova, Kira  and  Mart\'{i}nez Alonso, H\'{e}ctor  and  \c{C}\"{o}ltekin, \c{C}a\u{g}rı  and  Sulubacak, Umut  and  Uszkoreit, Hans  and  Macketanz, Vivien  and  Burchardt, Aljoscha  and  Harris, Kim  and  Marheinecke, Katrin  and  Rehm, Georg  and  Kayadelen, Tolga  and  Attia, Mohammed  and  Elkahky, Ali  and  Yu, Zhuoran  and  Pitler, Emily  and  Lertpradit, Saran  and  Mandl, Michael  and  Kirchner, Jesse  and  Alcalde, Hector Fernandez  and  Strnadov\'{a}, Jana  and  Banerjee, Esha  and  Manurung, Ruli  and  Stella, Antonio  and  Shimada, Atsuko  and  Kwak, Sookyoung  and  Mendonca, Gustavo  and  Lando, Tatiana  and  Nitisaroj, Rattima  and  Li, Josie},
  title     = {CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies},
  booktitle = {Proceedings of the CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {1--19},
  abstract  = {The Conference on Computational Natural Language Learning (CoNLL) features a
	shared task, in which participants train and test their learning systems on the
	same data sets. In 2017, the task was devoted to learning dependency parsers
	for a large number of languages, in a real-world setting without any
	gold-standard annotation on input. All test sets followed a unified annotation
	scheme, namely that of Universal Dependencies. In this paper, we define the
	task and evaluation methodology, describe how the data sets were prepared,
	report and analyze the main results, and provide a brief categorization of the
	different approaches of the participating systems.},
  url       = {http://www.aclweb.org/anthology/K17-3001}
}

