@Book{starSEM:2017,
  editor    = {Nancy Ide  and  Aurélie Herbelot  and  Lluís Màrquez},
  title     = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  url       = {http://www.aclweb.org/anthology/S17-1}
}

@InProceedings{finley-farmer-pakhomov:2017:starSEM,
  author    = {Finley, Gregory  and  Farmer, Stephanie  and  Pakhomov, Serguei},
  title     = {What Analogies Reveal about Word Vectors and their Compositionality},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {1--11},
  abstract  = {Analogy completion via vector arithmetic has become a common means of
	demonstrating the compositionality of word embeddings. Previous work have shown
	that this strategy works more reliably for certain types of analogical word
	relationships than for others, but these studies have not offered a convincing
	account for why this is the case. We arrive at such an account through an
	experiment that targets a wide variety of analogy questions and defines a
	baseline condition to more accurately measure the efficacy of our system. We
	find that the most reliably solvable analogy categories involve either 1) the
	application of a morpheme with clear syntactic effects, 2) male--female
	alternations, or 3) named entities. These broader types do not pattern cleanly
	along a syntactic--semantic divide. We suggest instead that their commonality
	is distributional, in that the difference between the distributions of two
	words in any given pair encompasses a relatively small number of word types.
	Our study offers a needed explanation for why analogy tests succeed and fail
	where they do and provides nuanced insight into the relationship between word
	distributions and the theoretical linguistic domains of syntax and semantics.},
  url       = {http://www.aclweb.org/anthology/S17-1001}
}

@InProceedings{rajana-EtAl:2017:starSEM,
  author    = {Rajana, Sneha  and  Callison-Burch, Chris  and  Apidianaki, Marianna  and  Shwartz, Vered},
  title     = {Learning Antonyms with Paraphrases and a Morphology-Aware Neural Network},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {12--21},
  abstract  = {Recognizing and distinguishing antonyms from other types of semantic relations
	is an essential part of language understanding systems. In this paper, we
	present a novel method for deriving antonym pairs using paraphrase pairs
	containing negation markers. We further propose a neural network model, AntNET,
	that integrates morphological features indicative of antonymy into a path-based
	relation detection algorithm. We demonstrate that our model outperforms
	state-of-the-art models in distinguishing antonyms from other semantic
	relations and is capable of efficiently handling multi-word expressions.},
  url       = {http://www.aclweb.org/anthology/S17-1002}
}

@InProceedings{ponti-vulic-korhonen:2017:starSEM,
  author    = {Ponti, Edoardo Maria  and  Vuli\'{c}, Ivan  and  Korhonen, Anna},
  title     = {Decoding Sentiment from Distributed Representations of Sentences},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {22--32},
  abstract  = {Distributed representations of sentences have been developed recently to
	represent their meaning as real-valued vectors. However, it is not clear how
	much information such representations retain about the polarity of sentences.
	To study this question, we decode sentiment from unsupervised sentence
	representations learned with different architectures (sensitive to the order of
	words, the order of sentences, or none) in 9 typologically diverse languages.
	Sentiment results from the (recursive) composition of lexical items and
	grammatical strategies such as negation and concession. The results are
	manifold: we show that there is no `one-size-fits-all' representation
	architecture outperforming the others across the board. Rather, the top-ranking
	architectures depend on the language at hand. Moreover, we find that in several
	cases the additive composition model based on skip-gram word vectors may
	surpass supervised state-of-art architectures such as bi-directional LSTMs.
	Finally, we provide a possible explanation of the observed variation based on
	the type of negative constructions in each language.},
  url       = {http://www.aclweb.org/anthology/S17-1003}
}

@InProceedings{vyas-carpuat:2017:starSEM,
  author    = {Vyas, Yogarshi  and  Carpuat, Marine},
  title     = {Detecting Asymmetric Semantic Relations in Context: A Case-Study on Hypernymy Detection},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {33--43},
  abstract  = {We introduce WHiC, a challenging testbed for detecting hypernymy, an asymmetric
	relation between words. While previous work has focused on detecting hypernymy
	between word types, we ground the meaning of words in specific contexts drawn
	from WordNet examples, and require predictions to be sensitive to changes in
	contexts. WHiC lets us analyze complementary properties of two approaches of
	inducing vector representations of word meaning in context. We show that such
	contextualized word representations also improve detection of a wider range of
	semantic relations in context.},
  url       = {http://www.aclweb.org/anthology/S17-1004}
}

@InProceedings{chen-sun:2017:starSEM,
  author    = {Chen, Ao  and  Sun, Maosong},
  title     = {Domain-Specific New Words Detection in Chinese},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {44--53},
  abstract  = {With the explosive growth of Internet, more and more domain-specific
	environments appear, such as forums, blogs, MOOCs and etc. Domain-specific
	words appear in these areas and always play a critical role in the
	domain-specific NLP tasks. This paper aims at extracting Chinese
	domain-specific new words automatically. The extraction of domain-specific new
	words has two parts including both new words in this domain and the especially
	important words. In this work, we propose a joint statistical model to perform
	these two works simultaneously. Compared to traditional new words detection
	models, our model doesn't need handcraft features which are labor intensive.
	Experimental results demonstrate that our joint model achieves a better
	performance compared with the state-of-the-art methods.},
  url       = {http://www.aclweb.org/anthology/S17-1005}
}

@InProceedings{gharbieh-bhavsar-cook:2017:starSEM,
  author    = {Gharbieh, Waseem  and  Bhavsar, Virendrakumar  and  Cook, Paul},
  title     = {Deep Learning Models For Multiword Expression Identification},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {54--64},
  abstract  = {Multiword expressions (MWEs) are lexical items that can be decomposed
	into multiple component words, but have properties that are
	unpredictable with respect to their component words. In this paper we
	propose the first deep learning models for token-level identification
	of MWEs. Specifically, we consider a layered feedforward network, a
	recurrent neural network, and convolutional neural networks. In
	experimental results we show that convolutional neural networks are
	able to outperform the previous state-of-the-art for MWE
	identification, with a convolutional neural network with three hidden
	layers giving the best performance.},
  url       = {http://www.aclweb.org/anthology/S17-1006}
}

@InProceedings{mohammad-bravomarquez:2017:starSEM,
  author    = {Mohammad, Saif  and  Bravo-Marquez, Felipe},
  title     = {Emotion Intensities in Tweets},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {65--77},
  abstract  = {This paper examines the task of detecting intensity of emotion from text. We
	create the first datasets of tweets annotated for anger, fear, joy, and sadness
	intensities. We use a technique called best--worst scaling (BWS) that improves
	annotation consistency and obtains reliable fine-grained scores. We show that
	emotion-word hashtags often impact emotion intensity, usually conveying a more
	intense emotion. Finally, we create a benchmark regression system and conduct
	experiments to determine: which features are useful for detecting emotion
	intensity; and, the extent to which two emotions are similar in terms of how
	they manifest in language.},
  url       = {http://www.aclweb.org/anthology/S17-1007}
}

@InProceedings{asghar-EtAl:2017:starSEM,
  author    = {Asghar, Nabiha  and  Poupart, Pascal  and  Jiang, Xin  and  Li, Hang},
  title     = {Deep Active Learning for Dialogue Generation},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {78--83},
  abstract  = {We propose an online, end-to-end, neural generative conversational model for
	open-domain dialogue. It is trained using a unique combination of offline
	two-phase supervised learning and online human-in-the-loop active learning.
	While most existing research proposes offline supervision or hand-crafted
	reward functions for online reinforcement, we devise a novel interactive
	learning mechanism based on hamming-diverse beam search for response generation
	and one-character user-feedback at each step. Experiments show that our model
	inherently promotes the generation of semantically relevant and interesting
	responses, and can be used to train agents with customized personas, moods and
	conversational styles.},
  url       = {http://www.aclweb.org/anthology/S17-1008}
}

@InProceedings{cocos-apidianaki-callisonburch:2017:starSEM,
  author    = {Cocos, Anne  and  Apidianaki, Marianna  and  Callison-Burch, Chris},
  title     = {Mapping the Paraphrase Database to WordNet},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {84--90},
  abstract  = {WordNet has facilitated important research in natural language processing but
	its usefulness is somewhat limited by its relatively small lexical coverage.
	The Paraphrase Database (PPDB) covers 650 times more words, but lacks the
	semantic structure of WordNet that would make it more directly useful for
	downstream tasks. We present a method for mapping words from PPDB to WordNet
	synsets with 89% accuracy. The mapping also lays important groundwork for
	incorporating WordNet's relations into PPDB so as to increase its utility for
	semantic reasoning in applications.},
  url       = {http://www.aclweb.org/anthology/S17-1009}
}

@InProceedings{feng-EtAl:2017:starSEM,
  author    = {Feng, Yukun  and  Yu, Dong  and  Xu, Jian  and  Liu, Chunhua},
  title     = {Semantic Frame Labeling with Target-based Neural Model},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {91--96},
  abstract  = {This paper explores the automatic learning of distributed representations of
	the target's context for semantic frame labeling with target-based neural
	model. We constrain the whole sentence as the model's input without feature
	extraction from the sentence. This is different from many previous works in
	which local feature extraction of the targets is widely used. This constraint
	makes the task harder, especially with long sentences, but also makes our model
	easily applicable to a range of resources and other similar tasks. We evaluate
	our model on several resources and get the state-of-the-art result on subtask 2
	of SemEval 2015 task 15. Finally, we extend the task to word-sense
	disambiguation task and we also achieve a strong result in comparison to
	state-of-the-art work.},
  url       = {http://www.aclweb.org/anthology/S17-1010}
}

@InProceedings{ferraro-EtAl:2017:starSEM,
  author    = {Ferraro, Francis  and  Poliak, Adam  and  Cotterell, Ryan  and  Van Durme, Benjamin},
  title     = {Frame-Based Continuous Lexical Semantics through Exponential Family Tensor Factorization and Semantic Proto-Roles},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {97--103},
  abstract  = {We study how different frame annotations complement one another when learning
	continuous lexical semantics. We learn the representations from a tensorized
	skip-gram model that consistently encodes syntactic-semantic content better,
	with multiple 10% gains over baselines.},
  url       = {http://www.aclweb.org/anthology/S17-1011}
}

@InProceedings{gupta-boleda-pado:2017:starSEM,
  author    = {Gupta, Abhijeet  and  Boleda, Gemma  and  Pad\'{o}, Sebastian},
  title     = {Distributed Prediction of Relations for Entities: The Easy, The Difficult, and The Impossible},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {104--109},
  abstract  = {Word embeddings are supposed to provide easy access to semantic relations such
	as "male of" (man--woman). While this claim has been investigated for
	concepts, little is known about the distributional behavior of relations of
	(Named) Entities. We describe two word embedding-based models that predict
	values for relational attributes of entities, and analyse them. The task is
	challenging, with major performance differences between relations. Contrary to
	many NLP tasks, high difficulty for a relation does not result from low
	frequency, but from (a) one-to-many mappings; and (b) lack of context patterns
	expressing the relation that are easy to pick up by word
	embeddings.},
  url       = {http://www.aclweb.org/anthology/S17-1012}
}

@InProceedings{maredia-EtAl:2017:starSEM,
  author    = {Maredia, Angel  and  Schechtman, Kara  and  Levitan, Sarah Ita  and  Hirschberg, Julia},
  title     = {Comparing Approaches for Automatic Question Identification},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {110--114},
  abstract  = {Collecting spontaneous speech corpora that are open-ended, yet topically
	constrained, is increasingly popular for research in spoken dialogue systems
	and speaker state, inter alia. Typically, these corpora are labeled by human
	annotators, either in the lab or through crowd-sourcing; however, this is
	cumbersome and time-consuming for large corpora. We present four different
	approaches to automatically tagging a corpus when general topics of the
	conversations are known. We develop these approaches on the Columbia X-Cultural
	Deception corpus and find accuracy that significantly exceeds the baseline.
	Finally, we conduct a cross-corpus evaluation by testing the best performing
	approach on the Columbia/SRI/Colorado corpus.},
  url       = {http://www.aclweb.org/anthology/S17-1013}
}

@InProceedings{medic-vsnajder-pado:2017:starSEM,
  author    = {Medi\'{c}, Zoran  and  \v{S}najder, Jan  and  Pad\'{o}, Sebastian},
  title     = {Does Free Word Order Hurt? Assessing the Practical Lexical Function Model for Croatian},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {115--120},
  abstract  = {The Practical Lexical Function (PLF) model is a model of computational
	distributional semantics that attempts to strike a balance between expressivity
	and learnability in predicting phrase meaning and shows competitive results. We
	investigate how well the PLF carries over to free word order languages, given
	that it builds on observations of predicate-argument combinations that are
	harder to recover in free word order languages. We evaluate variants of the PLF
	for Croatian, using a new lexical substitution dataset. We find that the PLF
	works about as well for Croatian as for English, but demonstrate that its
	strength lies in modeling verbs, and that the free word order affects the less
	robust PLF variant.},
  url       = {http://www.aclweb.org/anthology/S17-1014}
}

@InProceedings{nguyen-EtAl:2017:starSEM,
  author    = {Nguyen, Dai Quoc  and  Nguyen, Dat Quoc  and  Modi, Ashutosh  and  Thater, Stefan  and  Pinkal, Manfred},
  title     = {A Mixture Model for Learning Multi-Sense Word Embeddings},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {121--127},
  abstract  = {Word embeddings are now a standard technique for inducing meaning
	representations for words. For getting good representations, it is important to
	take into account different senses of a word. In this paper, we propose a
	mixture model for learning multi-sense word embeddings. Our model generalizes
	the previous works in that it allows to induce different weights of different
	senses of a word. The experimental results show that our model outperforms
	previous models on standard evaluation tasks.},
  url       = {http://www.aclweb.org/anthology/S17-1015}
}

@InProceedings{ostermann-EtAl:2017:starSEM,
  author    = {Ostermann, Simon  and  Roth, Michael  and  Thater, Stefan  and  Pinkal, Manfred},
  title     = {Aligning Script Events with Narrative Texts},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {128--134},
  abstract  = {Script knowledge plays a central role in text understanding and is relevant for
	a variety of downstream tasks. In this paper, we consider two recent datasets
	which provide a rich and general representation of script events in terms of
	paraphrase sets. We introduce the task of mapping event mentions in narrative
	texts to such script event types, and present a model for this task that
	exploits rich linguistic representations as well as information on temporal
	ordering. The results of our experiments demonstrate that this complex task is
	indeed feasible.},
  url       = {http://www.aclweb.org/anthology/S17-1016}
}

@InProceedings{rogers-drozd-li:2017:starSEM,
  author    = {Rogers, Anna  and  Drozd, Aleksandr  and  Li, Bofang},
  title     = {The (too Many) Problems of Analogical Reasoning with Word Vectors},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {135--148},
  abstract  = {This paper explores the possibilities of analogical reasoning with vector space
	models. Given two pairs of words with the same relation (e.g. man:woman ::
	king:queen), it was proposed that the offset between one pair of the
	corresponding word vectors can be used to identify the unknown member of the
	other pair (king - man + woman = queen). We argue against such "linguistic
	regularities" as a model for linguistic relations in vector space models and as
	a benchmark, and we show that the vector offset (as well as two other,
	better-performing methods) suffers from dependence on vector similarity.},
  url       = {http://www.aclweb.org/anthology/S17-1017}
}

@InProceedings{shutova-wundsam-yannakoudakis:2017:starSEM,
  author    = {Shutova, Ekaterina  and  Wundsam, Andreas  and  Yannakoudakis, Helen},
  title     = {Semantic Frames and Visual Scenes: Learning Semantic Role Inventories from Image and Video Descriptions},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {149--154},
  abstract  = {Frame-semantic parsing and semantic role labelling, that aim to automatically
	assign semantic roles to arguments of verbs in a sentence, have become an
	active strand of research in NLP. However, to date these methods have relied on
	a predefined inventory of semantic roles. In this paper, we present a method to
	automatically learn argument role inventories for verbs from large corpora of
	text, images and videos. We evaluate the method against manually constructed
	role inventories in FrameNet and show that the visual model outperforms the
	language-only model and operates with a high precision.},
  url       = {http://www.aclweb.org/anthology/S17-1018}
}

@InProceedings{shwartz-stanovsky-dagan:2017:starSEM,
  author    = {Shwartz, Vered  and  Stanovsky, Gabriel  and  Dagan, Ido},
  title     = {Acquiring Predicate Paraphrases from News Tweets},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {155--160},
  abstract  = {We present a simple method for ever-growing extraction of predicate paraphrases
	from news headlines in Twitter. Analysis of the output of ten weeks of
	collection shows that the accuracy of paraphrases with different support levels
	is estimated between 60-86%. We also demonstrate that our resource is to a
	large extent complementary to existing resources, providing many novel
	paraphrases. Our resource is publicly available, continuously expanding based
	on daily news.},
  url       = {http://www.aclweb.org/anthology/S17-1019}
}

@InProceedings{talmor-geva-berant:2017:starSEM,
  author    = {Talmor, Alon  and  Geva, Mor  and  Berant, Jonathan},
  title     = {Evaluating Semantic Parsing against a Simple Web-based Question Answering Model},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {161--167},
  abstract  = {Semantic parsing shines at analyzing complex natural language that involves
	composition and computation over multiple pieces of evidence. However, datasets
	for semantic parsing contain many factoid questions that can be answered from a
	single web document. In this paper, we propose to evaluate semantic
	parsing-based question answering models by comparing them to a question
	answering baseline that queries the web and extracts the answer only from web
	snippets, without access to the target knowledge-base. We investigate this
	approach on COMPLEXQUESTIONS, a dataset designed to focus on compositional
	language, and find that our model obtains reasonable performance (∼35 F1
	compared to 41 F1 of state-of-the-art). We find in our analysis that our model
	performs well on complex questions involving conjunctions, but struggles on
	questions that involve relation composition and superlatives.},
  url       = {http://www.aclweb.org/anthology/S17-1020}
}

@InProceedings{chersoni-lenci-blache:2017:starSEM,
  author    = {Chersoni, Emmanuele  and  Lenci, Alessandro  and  Blache, Philippe},
  title     = {Logical Metonymy in a Distributional Model of Sentence Comprehension},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {168--177},
  abstract  = {In theoretical linguistics, logical metonymy is defined as the combination of
	an event-subcategorizing verb with an entity-denoting direct object (e.g., The
	author began the book), so that the interpretation of the VP requires the
	retrieval of a covert event (e.g., writing). Psycholinguistic studies have
	revealed extra processing costs for logical metonymy, a phenomenon generally
	explained with the introduction of new semantic structure.
	In this paper, we present a general distributional model for sentence
	comprehension inspired by the Memory, Unification and Control model by Hagoort
	(2013,2016). We show that our distributional framework can account for the
	extra processing costs of logical metonymy and can identify the covert event in
	a classification task.},
  url       = {http://www.aclweb.org/anthology/S17-1021}
}

@InProceedings{hwang-EtAl:2017:starSEM,
  author    = {Hwang, Jena D.  and  Bhatia, Archna  and  Han, Na-Rae  and  O'Gorman, Tim  and  Srikumar, Vivek  and  Schneider, Nathan},
  title     = {Double Trouble: The Problem of Construal in Semantic Annotation of Adpositions},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {178--188},
  abstract  = {We consider the semantics of prepositions, revisiting a broad-coverage
	annotation scheme used for annotating all 4,250 preposition tokens in a 55,000
	word corpus of English. Attempts to apply the scheme to adpositions and case
	markers in other languages, as well as some problematic cases in English, have
	led us to reconsider the assumption that an adposition’s lexical contribution
	is equivalent to the role/relation that it mediates. Our proposal is to embrace
	the potential for construal in adposition use, expressing such phenomena
	directly at the token level to manage complexity and avoid sense proliferation.
	We suggest a framework to represent both the scene role and the adposition’s
	lexical function so they can be annotated at scale—supporting automatic,
	statistical processing of domain-general language—and discuss how this
	representation would allow for a simpler inventory of labels.},
  url       = {http://www.aclweb.org/anthology/S17-1022}
}

@InProceedings{kiss-EtAl:2017:starSEM,
  author    = {Kiss, Tibor  and  Pelletier, Francis Jeffry  and  Husic, Halima  and  Poppek, Johanna},
  title     = {Issues of Mass and Count: Dealing with `Dual-Life' Nouns},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {189--198},
  abstract  = {The topics of mass and count have been studied for many decades in philosophy
	(e.g., Quine, 1960; Pelletier, 1975), linguistics (e.g., McCawley, 1975; Allen,
	1980; Krifka, 1991) and psychology (e.g., Middleton et al, 2004; Barner et al,
	2009).                          More recently, interest from within computational
	linguistics has
	studied the issues involved (e.g., Pustejovsky, 1991; Bond, 2005; Schmidtke \&
	Kuperman, 2016), to name just a few.  As is pointed out in these works, there
	are many difficult conceptual issues involved in the study of this contrast. In
	this article we study one of these issues -- the ``Dual-Life'' of being
	simultaneously +mass and +count -- by means of an unusual combination of human
	annotation, online lexical resources, and online corpora.},
  url       = {http://www.aclweb.org/anthology/S17-1023}
}

@InProceedings{gilroy-lopez-maneth:2017:starSEM,
  author    = {Gilroy, Sorcha  and  Lopez, Adam  and  Maneth, Sebastian},
  title     = {Parsing Graphs with Regular Graph Grammars},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {199--208},
  abstract  = {Recently, several datasets have become available which represent natural
	language phenomena as graphs. Hyperedge Replacement Languages (HRL) have been
	the focus of much attention as a formalism to represent the graphs in these
	datasets. Chiang et al. (2013) prove that HRL graphs can be parsed in
	polynomial time with respect to the size of the input graph. We believe that
	HRL are more expressive than is necessary to represent semantic graphs and we
	propose the use of Regular Graph Languages (RGL; Courcelle 1991), which is a
	subfamily of HRL, as a possible alternative. We provide a top-down parsing
	algorithm for RGL that runs in time linear in the size of the input graph.},
  url       = {http://www.aclweb.org/anthology/S17-1024}
}

@InProceedings{jauhar-hovy:2017:starSEM,
  author    = {Jauhar, Sujay Kumar  and  Hovy, Eduard},
  title     = {Embedded Semantic Lexicon Induction with Joint Global and Local Optimization},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {209--219},
  abstract  = {Creating annotated frame lexicons such as PropBank and FrameNet is expensive
	and labor intensive. We present a method to induce an embedded frame lexicon in
	an minimally supervised fashion using nothing more than unlabeled
	predicate-argument word pairs. We hypothesize that aggregating such pair
	selectional preferences across training leads us to a global understanding that
	captures predicate-argument frame structure. Our approach revolves around a
	novel integration between a predictive embedding model and an Indian Buffet
	Process posterior regularizer. We show, through our experimental evaluation,
	that we outperform baselines on two tasks and can learn an embedded frame
	lexicon that is able to capture some interesting generalities in relation to
	hand-crafted semantic frames.},
  url       = {http://www.aclweb.org/anthology/S17-1025}
}

@InProceedings{eichler-EtAl:2017:starSEM,
  author    = {Eichler, Kathrin  and  Xu, Feiyu  and  Uszkoreit, Hans  and  Krause, Sebastian},
  title     = {Generating Pattern-Based Entailment Graphs for Relation Extraction},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {220--229},
  abstract  = {Relation extraction is the task of recognizing and extracting relations between
	entities or concepts in texts. A common approach is to exploit existing
	knowledge to learn linguistic patterns expressing the target relation and use
	these patterns for extracting new relation mentions. Deriving relation patterns
	automatically usually results in large numbers of candidates, which need to be
	filtered to derive a subset of patterns that reliably extract correct relation
	mentions. We address the pattern selection task by exploiting the knowledge
	represented by entailment graphs, which capture semantic relationships holding
	among the learned pattern candidates. This is motivated by the fact that a
	pattern may not express the target relation explicitly, but still be useful for
	extracting instances for which the relation holds, because its meaning entails
	the meaning of the target relation. We evaluate the usage of both automatically
	generated and gold-standard entailment graphs in a relation extraction scenario
	and present favorable experimental results, exhibiting the benefits of
	structuring and selecting patterns based on entailment graphs.},
  url       = {http://www.aclweb.org/anthology/S17-1026}
}

@InProceedings{becker-EtAl:2017:starSEM,
  author    = {Becker, Maria  and  Staniek, Michael  and  Nastase, Vivi  and  Palmer, Alexis  and  Frank, Anette},
  title     = {Classifying Semantic Clause Types: Modeling Context and Genre Characteristics with Recurrent Neural Networks and Attention},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {230--240},
  abstract  = {Detecting aspectual properties of clauses in the form of situation entity types
	has been shown to depend on a combination of syntactic-semantic and contextual
	features. We explore this task in a deep-learning framework, where tuned word
	representations capture lexical, syntactic and semantic features. We introduce
	an attention mechanism that pinpoints relevant context not only for the current
	instance, but also for the larger context. Apart from implicitly capturing task
	relevant features, the advantage of our neural model is that it avoids the need
	to reproduce linguistic features for other languages and is thus more easily
	transferable. We present experiments for English and German that achieve
	competitive performance. We present a novel take on modeling and exploiting
	genre information and showcase the adaptation of our system from one language
	to another.},
  url       = {http://www.aclweb.org/anthology/S17-1027}
}

@InProceedings{sarioglukayi-EtAl:2017:starSEM,
  author    = {Sarioglu Kayi, Efsun  and  Diab, Mona  and  Pauselli, Luca  and  Compton, Michael  and  Coppersmith, Glen},
  title     = {Predictive Linguistic Features of Schizophrenia},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {241--250},
  abstract  = {Schizophrenia is one of the most disabling and difficult to treat of all human
	medical/health conditions, ranking in the top ten causes of disability
	worldwide. It has been a puzzle in part due to difficulty in identifying its
	basic, fundamental components. Several studies have shown that some
	manifestations of schizophrenia (e.g., the negative symptoms that include
	blunting of speech prosody, as well as the disorganization symptoms that lead
	to disordered language) can be understood from the perspective of linguistics.
	However, schizophrenia research has not kept pace with technologies in
	computational linguistics, especially in semantics and pragmatics. As such, we
	examine the writings of schizophrenia patients analyzing their syntax,
	semantics and pragmatics. In addition, we analyze tweets of (self proclaimed)
	schizophrenia patients who publicly discuss their diagnoses. For writing
	samples dataset, syntactic features are found to be the most successful in
	classification whereas for the less structured Twitter dataset, a combination
	of  features performed the best.},
  url       = {http://www.aclweb.org/anthology/S17-1028}
}

@InProceedings{sachan-xing:2017:starSEM,
  author    = {Sachan, Mrinmaya  and  Xing, Eric},
  title     = {Learning to Solve Geometry Problems from Natural Language Demonstrations in Textbooks},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {251--261},
  abstract  = {Humans as well as animals are good at imitation. Inspired by this, the learning
	by demonstration view of machine learning learns to perform a task from
	detailed example demonstrations. In this paper, we introduce the task of
	question answering using natural language demonstrations where the question
	answering system is provided with detailed demonstrative solutions to questions
	in natural language. As a case study, we explore the task of learning to solve
	geometry problems using demonstrative solutions available in textbooks. We
	collect a new dataset of demonstrative geometry solutions from textbooks and
	explore approaches that learn to interpret these demonstrations as well as to
	use these interpretations to solve geometry problems. Our approaches show
	improvements over the best previously published system for solving geometry
	problems.},
  url       = {http://www.aclweb.org/anthology/S17-1029}
}

@InProceedings{antoniorodrigues-EtAl:2017:starSEM,
  author    = {Ant\'{o}nio Rodrigues, Jo\~{a}o  and  Saedi, Chakaveh  and  Maraev, Vladislav  and  Silva, Jo\~{a}o  and  Branco, Ant\'{o}nio},
  title     = {Ways of Asking and Replying in Duplicate Question Detection},
  booktitle = {Proceedings of the 6th Joint Conference on Lexical and Computational Semantics (*SEM 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {262--270},
  abstract  = {This paper presents the results of systematic experimentation on the impact in
	duplicate question detection of different types of questions across both a
	number of established approaches and a novel, superior one used to address this
	language
	processing task. This study permits to gain a novel insight on the different
	levels of robustness of the diverse detection methods with respect to different
	conditions of their application, including the ones that approximate real usage
	scenarios.},
  url       = {http://www.aclweb.org/anthology/S17-1030}
}

