@inproceedings{kordjamshidi-etal-2017-spatial,
title = "Spatial Language Understanding with Multimodal Graphs using Declarative Learning based Programming",
author = "Kordjamshidi, Parisa and
Rahgooy, Taher and
Manzoor, Umar",
editor = "Chang, Kai-Wei and
Chang, Ming-Wei and
Srikumar, Vivek and
Rush, Alexander M.",
booktitle = "Proceedings of the 2nd Workshop on Structured Prediction for Natural Language Processing",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4306",
doi = "10.18653/v1/W17-4306",
pages = "33--43",
abstract = "This work is on a previously formalized semantic evaluation task of spatial role labeling (SpRL) that aims at extraction of formal spatial meaning from text. Here, we report the results of initial efforts towards exploiting visual information in the form of images to help spatial language understanding. We discuss the way of designing new models in the framework of declarative learning-based programming (DeLBP). The DeLBP framework facilitates combining modalities and representing various data in a unified graph. The learning and inference models exploit the structure of the unified graph as well as the global first order domain constraints beyond the data to predict the semantics which forms a structured meaning representation of the spatial context. Continuous representations are used to relate the various elements of the graph originating from different modalities. We improved over the state-of-the-art results on SpRL.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kordjamshidi-etal-2017-spatial">
<titleInfo>
<title>Spatial Language Understanding with Multimodal Graphs using Declarative Learning based Programming</title>
</titleInfo>
<name type="personal">
<namePart type="given">Parisa</namePart>
<namePart type="family">Kordjamshidi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Taher</namePart>
<namePart type="family">Rahgooy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Umar</namePart>
<namePart type="family">Manzoor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Workshop on Structured Prediction for Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kai-Wei</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ming-Wei</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexander</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Rush</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Copenhagen, Denmark</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This work is on a previously formalized semantic evaluation task of spatial role labeling (SpRL) that aims at extraction of formal spatial meaning from text. Here, we report the results of initial efforts towards exploiting visual information in the form of images to help spatial language understanding. We discuss the way of designing new models in the framework of declarative learning-based programming (DeLBP). The DeLBP framework facilitates combining modalities and representing various data in a unified graph. The learning and inference models exploit the structure of the unified graph as well as the global first order domain constraints beyond the data to predict the semantics which forms a structured meaning representation of the spatial context. Continuous representations are used to relate the various elements of the graph originating from different modalities. We improved over the state-of-the-art results on SpRL.</abstract>
<identifier type="citekey">kordjamshidi-etal-2017-spatial</identifier>
<identifier type="doi">10.18653/v1/W17-4306</identifier>
<location>
<url>https://aclanthology.org/W17-4306</url>
</location>
<part>
<date>2017-09</date>
<extent unit="page">
<start>33</start>
<end>43</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Spatial Language Understanding with Multimodal Graphs using Declarative Learning based Programming
%A Kordjamshidi, Parisa
%A Rahgooy, Taher
%A Manzoor, Umar
%Y Chang, Kai-Wei
%Y Chang, Ming-Wei
%Y Srikumar, Vivek
%Y Rush, Alexander M.
%S Proceedings of the 2nd Workshop on Structured Prediction for Natural Language Processing
%D 2017
%8 September
%I Association for Computational Linguistics
%C Copenhagen, Denmark
%F kordjamshidi-etal-2017-spatial
%X This work is on a previously formalized semantic evaluation task of spatial role labeling (SpRL) that aims at extraction of formal spatial meaning from text. Here, we report the results of initial efforts towards exploiting visual information in the form of images to help spatial language understanding. We discuss the way of designing new models in the framework of declarative learning-based programming (DeLBP). The DeLBP framework facilitates combining modalities and representing various data in a unified graph. The learning and inference models exploit the structure of the unified graph as well as the global first order domain constraints beyond the data to predict the semantics which forms a structured meaning representation of the spatial context. Continuous representations are used to relate the various elements of the graph originating from different modalities. We improved over the state-of-the-art results on SpRL.
%R 10.18653/v1/W17-4306
%U https://aclanthology.org/W17-4306
%U https://doi.org/10.18653/v1/W17-4306
%P 33-43
Markdown (Informal)
[Spatial Language Understanding with Multimodal Graphs using Declarative Learning based Programming](https://aclanthology.org/W17-4306) (Kordjamshidi et al., 2017)
ACL