@Book{Demos:2018,
  editor    = {Eduardo Blanco  and  Wei Lu},
  title     = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  url       = {http://www.aclweb.org/anthology/D18-20}
}

@InProceedings{tanveer-ture:2018:Demos,
  author    = {Tanveer, Md Iftekhar  and  Ture, Ferhan},
  title     = {SyntaViz: Visualizing Voice Queries through a Syntax-Driven Hierarchical Ontology},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {1--6},
  abstract  = {This paper describes SyntaViz, a visualization interface specifically designed for analyzing natural-language queries that were created by users of a voice-enabled product. SyntaViz provides a platform for browsing the ontology of user queries from a syntax-driven perspective, providing quick access to high-impact failure points of the existing intent understanding system and evidence for data-driven decisions in the development cycle. A case study on Xfinity X1 (a voice-enabled entertainment platform from Comcast) reveals that SyntaViz helps developers identify multiple action items in a short amount of time without any special training. SyntaViz has been open-sourced for the benefit of the community.},
  url       = {http://www.aclweb.org/anthology/D18-2001}
}

@InProceedings{yin-neubig:2018:Demos,
  author    = {Yin, Pengcheng  and  Neubig, Graham},
  title     = {TRANX: A Transition-based Neural Abstract Syntax Parser for Semantic Parsing and Code Generation},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {7--12},
  abstract  = {We present TRANX, a transition-based neural semantic parser that maps natural language (NL) utterances into formal meaning representations (MRs). TRANX uses a transition system based on the abstract syntax description language for the target MR, which gives it two major advantages: (1) it is highly accurate, using information from the syntax of the target MR to constrain the output space and model the information flow, and (2) it is highly generalizable, and can easily be applied to new types of MR by just writing a new abstract syntax description corresponding to the allowable structures in the MR. Experiments on four different semantic parsing and code generation tasks show that our system is generalizable, extensible, and effective, registering strong results compared to existing neural semantic parsers.},
  url       = {http://www.aclweb.org/anthology/D18-2002}
}

@InProceedings{dou-EtAl:2018:Demos,
  author    = {Dou, Longxu  and  Qin, Guanghui  and  Wang, Jinpeng  and  Yao, Jin-Ge  and  Lin, Chin-Yew},
  title     = {Data2Text Studio: Automated Text Generation from Structured Data},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {13--18},
  abstract  = {Data2Text Studio is a platform for automated text generation from structured data. It is equipped with a Semi-HMMs model to extract high-quality templates and corresponding trigger conditions from parallel data automatically, which improves the interactivity and interpretability of the generated text. In addition, several easy-to-use tools are provided for developers to edit templates of pre-trained models, and APIs are released for developers to call the pre-trained model to generate texts in third-party applications. We conduct experiments on RotoWire datasets for template extraction and text generation. The results show that our model achieves improvements on both tasks.},
  url       = {http://www.aclweb.org/anthology/D18-2003}
}

@InProceedings{mamou-EtAl:2018:Demos,
  author    = {Mamou, Jonathan  and  Pereg, Oren  and  Wasserblat, Moshe  and  Eirew, Alon  and  Green, Yael  and  Guskin, Shira  and  Izsak, Peter  and  Korat, Daniel},
  title     = {Term Set Expansion based NLP Architect by Intel AI Lab},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {19--24},
  abstract  = {We present SetExpander, a corpus-based system for expanding a seed set of terms into a more complete set of terms that belong to the same semantic class. SetExpander implements an iterative end-to-end workflow. It enables users to easily select a seed set of terms, expand it, view the expanded set, validate it, re-expand the validated set and store it, thus simplifying},
  url       = {http://www.aclweb.org/anthology/D18-2004}
}

@InProceedings{zen-ehsani-solak:2018:Demos,
  author    = {Özenç, Berke  and  Ehsani, Razieh  and  Solak, Ercan},
  title     = {MorAz: an Open-source Morphological Analyzer for Azerbaijani Turkish},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {25--29},
  abstract  = {MorAz is an open-source morphological analyzer for Azerbaijani Turkish. The analyzer is available through both as a website for interactive exploration and as a RESTful web service for integration into a natural language processing pipeline. MorAz implements the morphology of Azerbaijani Turkish in two-level using Helsinki finite-state transducer and wraps the analyzer with python scripts in a Django instance.},
  url       = {http://www.aclweb.org/anthology/D18-2005}
}

@InProceedings{loginova-neumann:2018:Demos,
  author    = {Loginova, Ekaterina  and  Neumann, Günter},
  title     = {An Interactive Web-Interface for Visualizing the Inner Workings of the Question Answering LSTM},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {30--35},
  abstract  = {We present a visualisation tool which aims to illuminate the inner workings of an LSTM model for question answering. It plots heatmaps of neurons' firings and allows a user to check the dependency between neurons and manual features. The system possesses an interactive web-interface and can be adapted to other models and domains. },
  url       = {http://www.aclweb.org/anthology/D18-2006}
}

@InProceedings{liu-EtAl:2018:Demos,
  author    = {Liu, Shusen  and  Li, Tao  and  Li, Zhimin  and  Srikumar, Vivek  and  Pascucci, Valerio  and  Bremer, Peer-Timo},
  title     = {Visual Interrogation of Attention-Based Models for Natural Language Inference and Machine Comprehension},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {36--41},
  abstract  = {Neural networks models have gained unprecedented popularity in natural language processing due to their state-of-the-art performance and the flexible end-to-end training scheme. Despite their advantages, the lack of interpretability hinders the deployment and refinement of the models. In this work, we present a flexible visualization library for creating customized visual analytic environments, in which the user can investigate and interrogate the relationships among the input, the model internals (i.e., attention), and the output predictions, which in turn shed light on the model decision-making process.},
  url       = {http://www.aclweb.org/anthology/D18-2007}
}

@InProceedings{adel-EtAl:2018:Demos,
  author    = {Adel, Heike  and  Bostan, Laura Ana Maria  and  Papay, Sean  and  Padó, Sebastian  and  Klinger, Roman},
  title     = {DERE: A Task and Domain-Independent Slot Filling Framework for Declarative Relation Extraction},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {42--47},
  abstract  = {Most machine learning systems for natural language processing are tailored to specific tasks. As a result, comparability of models across tasks is missing and their applicability to new tasks is limited. This affects end users without machine learning experience as well as model developers. To address these limitations, we present DERE, a novel framework for declarative specification and compilation of template-based information extraction. It uses a generic specification language for the task and for data annotations in terms of spans and frames. This formalism enables the representation of a large variety of natural language processing challenges. The backend can be instantiated by different models, following different paradigms. The clear separation of frame specification and model backend will ease the implementation of new models and the evaluation of different models across different tasks. Furthermore, it simplifies transfer learning, joint learning across tasks and/or domains as well as the assessment of model generalizability. DERE is available as open-source software.},
  url       = {http://www.aclweb.org/anthology/D18-2008}
}

@InProceedings{yimam-biemann:2018:Demos,
  author    = {Yimam, Seid Muhie  and  Biemann, Chris},
  title     = {Demonstrating Par4Sem - A Semantic Writing Aid with Adaptive Paraphrasing},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {48--53},
  abstract  = {In this paper, we present Par4Sem, a semantic writing aid tool based on adaptive paraphrasing. Unlike many annotation tools that are primarily used to collect training examples, Par4Sem is integrated into a real word application, in this case a writing aid tool, in order to collect training examples from usage data. Par4Sem is a tool, which supports an adaptive, iterative, and interactive process where the underlying machine learning models are updated for each iteration using new training examples from usage data. After motivating the use of ever-learning tools in NLP applications, we evaluate Par4Sem by adopting it to a text simplification task through mere usage.},
  url       = {http://www.aclweb.org/anthology/D18-2009}
}

@InProceedings{tolmachev-kawahara-kurohashi:2018:Demos,
  author    = {Tolmachev, Arseny  and  Kawahara, Daisuke  and  Kurohashi, Sadao},
  title     = {Juman++: A Morphological Analysis Toolkit for Scriptio Continua},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {54--59},
  abstract  = {We present a three-part toolkit for developing morphological analyzers for languages without natural word boundaries. The first part is a C++11/14 lattice-based morphological analysis library that uses a combination of linear and recurrent neural net language models for analysis. The other parts are a tool for exposing problems in the trained model and a partial annotation tool. Our morphological analyzer of Japanese achieves new SOTA on Jumandic-based corpora while being 250 times faster than the previous one. We also perform a small experiment and quantitive analysis and experience of using development tools. All components of the toolkit is open source and available under a permissive Apache 2 License.},
  url       = {http://www.aclweb.org/anthology/D18-2010}
}

@InProceedings{ajjour-EtAl:2018:Demos,
  author    = {Ajjour, Yamen  and  Wachsmuth, Henning  and  Kiesel, Dora  and  Riehmann, Patrick  and  Fan, Fan  and  Castiglia, Giuliano  and  Adejoh, Rosemary  and  Fröhlich, Bernd  and  Stein, Benno},
  title     = {Visualization of the Topic Space of Argument Search Results in args.me},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {60--65},
  abstract  = {In times of fake news and alternative facts, pro and con arguments on controversial topics are of increasing importance. Recently, we presented args.me as the first search engine for arguments on the web. In its initial version, args.me ranked arguments solely by their relevance to a topic queried for, making it hard to learn about the diverse topical aspects covered by the search results. To tackle this shortcoming, we integrated a visualization interface for result exploration in args.me that provides an instant overview of the main aspects in a barycentric coordinate system. This topic space is generated ad-hoc from controversial issues on Wikipedia and argument-specific LDA models. In two case studies, we demonstrate how individual arguments can be found easily through interactions with the visualization, such as highlighting and filtering.},
  url       = {http://www.aclweb.org/anthology/D18-2011}
}

@InProceedings{kudo-richardson:2018:Demos,
  author    = {Kudo, Taku  and  Richardson, John},
  title     = {SentencePiece: A simple and language independent subword tokenizer and detokenizer for Neural Text Processing.},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {66--71},
  abstract  = {This paper describes SentencePiece, a language-independent subword},
  url       = {http://www.aclweb.org/anthology/D18-2012}
}

@InProceedings{ning-EtAl:2018:Demos,
  author    = {Ning, Qiang  and  Zhou, Ben  and  Feng, Zhili  and  Peng, Haoruo  and  Roth, Dan},
  title     = {CogCompTime: A Tool for Understanding Time in Natural Language},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {72--77},
  abstract  = {Automatic extraction of temporal information is important for natural language understanding. It involves two basic tasks: (1) Understanding time expressions that are mentioned explicitly in text (e.g., February 27, 1998 or tomorrow), and (2) Understanding temporal information that is conveyed implicitly via relations. This paper introduces CogCompTime, a system that has these two important functionalities. It incorporates the most recent progress, achieves state-of-the-art performance, and is publicly available at http://cogcomp.org/page/publication\_view/844.},
  url       = {http://www.aclweb.org/anthology/D18-2013}
}

@InProceedings{wiedemann-yimam-biemann:2018:Demos,
  author    = {Wiedemann, Gregor  and  Yimam, Seid Muhie  and  Biemann, Chris},
  title     = {A Multilingual Information Extraction Pipeline for Investigative Journalism},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {78--83},
  abstract  = {We introduce an advanced information extraction pipeline to automatically process very large collections of unstructured textual data for the purpose of investigative journalism. The pipeline serves as a new input processor for the upcoming major release of our New/s/leak 2.0 software, which we develop in cooperation with a large German news organization. The use case is that journalists receive a large collection of files up to several Gigabytes containing unknown contents. Collections may originate either from official disclosures of documents, e.g. Freedom of Information Act requests, or unofficial data leaks. },
  url       = {http://www.aclweb.org/anthology/D18-2014}
}

@InProceedings{peter-beck-ney:2018:Demos,
  author    = {Peter, Jan-Thorsten  and  Beck, Eugen  and  Ney, Hermann},
  title     = {Sisyphus, a Workflow Manager Designed for Machine Translation and Automatic Speech Recognition},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {84--89},
  abstract  = {Training and testing many possible parameters or model architectures of state-of-the-art machine translation or automatic speech recognition system is a cumbersome task. They usually require a long pipeline of commands reaching from pre-processing the training data to post-processing and evaluating the output.},
  url       = {http://www.aclweb.org/anthology/D18-2015}
}

@InProceedings{lakomkin-EtAl:2018:Demos,
  author    = {Lakomkin, Egor  and  Magg, Sven  and  Weber, Cornelius  and  Wermter, Stefan},
  title     = {KT-Speech-Crawler: Automatic Dataset Construction for Speech Recognition from YouTube Videos},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {90--95},
  abstract  = {We describe KT-Speech-Crawler: an approach for automatic dataset construction for speech recognition by crawling YouTube videos. We outline several filtering and post-processing steps, which extract samples that can be used for training end-to-end neural speech recognition systems. In our experiments, we demonstrate that a single-core version of the crawler can obtain around 150 hours of transcribed speech within a day, containing an estimated 3.5% word error rate in the transcriptions. Automatically collected samples contain reading and spontaneous speech recorded in various conditions including background noise and music, distant microphone recordings, and a variety of accents and reverberation. When training a deep neural network on speech recognition, we observed around 40% word error rate reduction on the Wall Street Journal dataset by integrating 200 hours of the collected samples into the training set.},
  url       = {http://www.aclweb.org/anthology/D18-2016}
}

@InProceedings{zhang-EtAl:2018:Demos,
  author    = {Zhang, Ni  and  Zhang, Tongtao  and  Bhattacharya, Indrani  and  Ji, Heng  and  Radke, Rich},
  title     = {Visualizing Group Dynamics based on Multiparty Meeting Understanding},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {96--101},
  abstract  = {Group discussions are usually aimed at sharing opinions, reaching consensus and making good decisions based on group knowledge. During a discussion, participants might adjust their own opinions as well as tune their attitudes towards others' opinions, based on the unfolding interactions. In this paper, we demonstrate a framework to visualize such dynamics; at each instant of a conversation, the participants' opinions and potential influence on their counterparts is easily visualized. We use multi-party meeting opinion mining based on bipartite graphs to extract opinions and calculate mutual influential factors, using the Lunar Survival Task as a study case.},
  url       = {http://www.aclweb.org/anthology/D18-2017}
}

@InProceedings{boratko-EtAl:2018:Demos,
  author    = {Boratko, Michael  and  Padigela, Harshit  and  Mikkilineni, Divyendra  and  Yuvraj, Pritish  and  Das, Rajarshi  and  McCallum, Andrew  and  Chang, Maria  and  Fokoue, Achille  and  Kapanipathi, Pavan  and  Mattei, Nicholas  and  Musa, Ryan  and  Talamadupula, Kartik  and  Witbrock, Michael},
  title     = {An Interface for Annotating Science Questions},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {102--107},
  abstract  = {Recent work introduces the AI2 Reasoning Challenge (ARC) and the associated ARC dataset that partitions open domain, complex science questions into an Easy Set and a Challenge Set. That work includes an analysis of 100 questions with respect to the types of knowledge and reasoning required to answer them. However, it does not include clear definitions of these types, nor does it offer information about the quality of the labels or the annotation process used. In this paper, we introduce a novel interface for human annotation of science question-answer pairs with their respective knowledge and reasoning types, in order that the classification of new questions may be improved. We build on the classification schema proposed by prior work on the ARC dataset, and evaluate the effectiveness of our interface with a preliminary study involving 10 participants.},
  url       = {http://www.aclweb.org/anthology/D18-2018}
}

@InProceedings{nghiem-ananiadou:2018:Demos,
  author    = {Nghiem, Minh-Quoc  and  Ananiadou, Sophia},
  title     = {APLenty: annotation tool for creating high-quality datasets using active and proactive learning},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {108--113},
  abstract  = {In this paper, we present APLenty, an annotation tool for creating high-quality sequence labeling datasets using active and proactive learning.},
  url       = {http://www.aclweb.org/anthology/D18-2019}
}

@InProceedings{sorokin-gurevych:2018:Demos,
  author    = {Sorokin, Daniil  and  Gurevych, Iryna},
  title     = {Interactive Instance-based Evaluation of Knowledge Base Question Answering},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {114--119},
  abstract  = {Most approaches to Knowledge Base Question Answering are based on semantic parsing. In this paper, we present a tool that aids in debugging of question answering systems that construct a structured semantic representation for the input question. Previous work has largely focused on building question answering interfaces or evaluation frameworks that unify multiple data sets. The primary objective of our system is to enable interactive debugging of model predictions on individual instances (questions) and to simplify manual error analysis. Our interactive interface helps researchers to understand the shortcomings of a particular model, qualitatively analyze the complete pipeline and compare different models. A set of sit-by sessions was used to validate our interface design.},
  url       = {http://www.aclweb.org/anthology/D18-2020}
}

@InProceedings{patel-EtAl:2018:Demos,
  author    = {Patel, Ajay  and  Sands, Alexander  and  Callison-Burch, Chris  and  Apidianaki, Marianna},
  title     = {Magnitude: A Fast, Efficient Universal Vector Embedding Utility Package},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {120--126},
  abstract  = {Vector space embedding models like word2vec, GloVe, and fastText are extremely popular representations in natural language processing (NLP) applications. We present Magnitude, a fast, lightweight tool for utilizing and processing embeddings. Magnitude is an open source Python package with a compact vector storage file format that allows for efficient manipulation of huge numbers of embeddings. Magnitude performs common operations up to 60 to 6,000 times faster than Gensim. Magnitude introduces several novel features for improved robustness like out-of-vocabulary lookups.},
  url       = {http://www.aclweb.org/anthology/D18-2021}
}

@InProceedings{boullosa-EtAl:2018:Demos,
  author    = {Boullosa, Beto  and  Eckart de Castilho, Richard  and  Kumar, Naveen  and  Klie, Jan-Christoph  and  Gurevych, Iryna},
  title     = {Integrating Knowledge-Supported Search into the INCEpTION Annotation Platform},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {127--132},
  abstract  = {Annotating entity mentions and linking them to a knowledge resource are essential tasks in many domains. It disambiguates mentions, introduces cross-document coreferences, and the resources contribute extra information, e.g. taxonomic relations. Such tasks benefit from text annotation tools that integrate a search which covers the text, the annotations, as well as the knowledge resource. However, to the best of our knowledge, no current tools integrate knowledge-supported search as well as entity linking support. We address this gap by introducing knowledge-supported search functionality into the INCEpTION text annotation platform. In our approach, cross-document references are created by linking entity mentions to a knowledge base in the form of a structured hierarchical vocabulary. The resulting annotations are then indexed to enable fast and yet complex queries taking into account the text, the annotations, and the vocabulary structure.},
  url       = {http://www.aclweb.org/anthology/D18-2022}
}

@InProceedings{wang-utiyama-sumita:2018:Demos,
  author    = {Wang, Xiaolin  and  Utiyama, Masao  and  Sumita, Eiichiro},
  title     = {CytonMT: an Efficient Neural Machine Translation Open-source Toolkit Implemented in C++},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {133--138},
  abstract  = {This paper presents an open-source neural machine translation toolkit named CytonMT\footnote{https://github.com/arthurxlw/cytonMt}. The toolkit is built from scratch only using C++ and NVIDIA's GPU-accelerated libraries. The toolkit features training efficiency, code simplicity and translation quality. Benchmarks show that cytonMT accelerates the training speed by 64.5\% to 110.8\% on neural networks of various sizes, and achieves competitive translation quality.},
  url       = {http://www.aclweb.org/anthology/D18-2023}
}

@InProceedings{han-EtAl:2018:Demos,
  author    = {Han, Xu  and  Cao, Shulin  and  Lv, Xin  and  Lin, Yankai  and  Liu, Zhiyuan  and  Sun, Maosong  and  Li, Juanzi},
  title     = {OpenKE: An Open Toolkit for Knowledge Embedding},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {139--144},
  abstract  = {We release an open toolkit for knowledge embedding (OpenKE), which provides a unified framework and various fundamental models to embed knowledge graphs into a continuous low-dimensional space. OpenKE prioritizes operational efficiency to support quick model validation and large-scale knowledge representation learning. Meanwhile, OpenKE maintains sufficient modularity and extensibility to easily incorporate new models into the framework. Besides the toolkit, the embeddings of some existing large-scale knowledge graphs pre-trained by OpenKE are also available, which can be directly applied for many applications including information retrieval, personalized recommendation and question answering. The toolkit, documentation, and pre-trained embeddings are all released on http://openke.thunlp.org/.},
  url       = {http://www.aclweb.org/anthology/D18-2024}
}

@InProceedings{labutov-srivastava-mitchell:2018:Demos,
  author    = {Labutov, Igor  and  Srivastava, Shashank  and  Mitchell, Tom},
  title     = {LIA: A Natural Language Programmable Personal Assistant},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {145--150},
  abstract  = {We present LIA, an intelligent personal assistant that can be programmed using natural language. Our system demonstrates multiple competencies towards learning from human-like interactions. These include the ability to be taught reusable conditional procedures, the ability to be taught new knowledge about the world (concepts in an ontology) and the ability to be taught how to ground that knowledge in a set of sensors and effectors. Building such a system highlights design questions regarding the overall architecture that such an agent should have, as well as questions about parsing and grounding language in situational contexts. We outline key properties of this architecture, and demonstrate a prototype that embodies them in the form of a personal assistant on an Android device.},
  url       = {http://www.aclweb.org/anthology/D18-2025}
}

@InProceedings{raux-EtAl:2018:Demos,
  author    = {Raux, Antoine  and  Ma, Yi  and  Yang, Paul  and  Wong, Felicia},
  title     = {PizzaPal: Conversational Pizza Ordering using a High-Density Conversational AI Platform},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {151--156},
  abstract  = {This paper describes PizzaPal, a voice-only agent for ordering pizza, as well as the Conversational AI architecture built at b4.ai. Based on the principles of high-density conversational AI, it supports natural and flexible interactions through neural conversational language understanding, robust dialog state tracking, and hierarchical task decomposition.},
  url       = {http://www.aclweb.org/anthology/D18-2026}
}

@InProceedings{raghuvanshi-carroll-raghunathan:2018:Demos,
  author    = {Raghuvanshi, Arushi  and  Carroll, Lucien  and  Raghunathan, Karthik},
  title     = {Developing Production-Level Conversational Interfaces with Shallow Semantic Parsing},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {157--162},
  abstract  = {We demonstrate an end-to-end approach for building conversational interfaces from prototype to production that has proven to work well for a number of applications across diverse verticals. Our architecture improves on the standard domain-intent-entity classification hierarchy and dialogue management architecture by leveraging shallow semantic parsing. We observe that NLU systems for industry applications often require more structured representations of entity relations than provided by the standard hierarchy, yet without requiring full semantic parses which are often inaccurate on real-world conversational data. We distinguish two kinds of semantic properties that can be provided through shallow semantic parsing: entity groups and entity roles. We also provide live demos of conversational apps built for two different use cases: food ordering and meeting control.},
  url       = {http://www.aclweb.org/anthology/D18-2027}
}

@InProceedings{vadapalli-EtAl:2018:Demos,
  author    = {Vadapalli, Raghuram  and  Syed, Bakhtiyar  and  Prabhu, Nishant  and  Srinivasan, Balaji Vasan  and  Varma, Vasudeva},
  title     = {When science journalism meets artificial intelligence : An interactive demonstration},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {163--168},
  abstract  = {We present an online interactive tool that generates titles of blog titles and thus take the first step toward automating science journalism. Science journalism aims to transform jargon-laden scientific articles into a form that},
  url       = {http://www.aclweb.org/anthology/D18-2028}
}

@InProceedings{cer-EtAl:2018:Demos,
  author    = {Cer, Daniel  and  Yang, Yinfei  and  Kong, Sheng-yi  and  Hua, Nan  and  Limtiaco, Nicole  and  St. John, Rhomni  and  Constant, Noah  and  Guajardo-Cespedes, Mario  and  Yuan, Steve  and  Tar, Chris  and  Strope, Brian  and  Kurzweil, Ray},
  title     = {Universal Sentence Encoder for English},
  booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations},
  month     = {November},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {169--174},
  abstract  = {We present easy-to-use TensorFlow Hub sentence embedding models having good task transfer performance. Model variants allow for trade-offs between accuracy and compute resources. We report the relationship between model complexity, resources, and transfer performance. Comparisons are made with baselines without transfer learning and to baselines that incorporate word-level transfer. Transfer learning using sentence-level embeddings is shown to outperform models without transfer learning and often those that use only word-level transfer. We show good transfer task performance with minimal training data and obtain encouraging results on word embedding association tests (WEAT) of model bias.},
  url       = {http://www.aclweb.org/anthology/D18-2029}
}

