@inproceedings{ghanimifard-dobnik-2019-neural,
title = "What a neural language model tells us about spatial relations",
author = "Ghanimifard, Mehdi and
Dobnik, Simon",
editor = "Bhatia, Archna and
Bisk, Yonatan and
Kordjamshidi, Parisa and
Thomason, Jesse",
booktitle = "Proceedings of the Combined Workshop on Spatial Language Understanding ({S}p{LU}) and Grounded Communication for Robotics ({R}obo{NLP})",
month = jun,
year = "2019",
address = "Minneapolis, Minnesota",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-1608",
doi = "10.18653/v1/W19-1608",
pages = "71--81",
abstract = "Understanding and generating spatial descriptions requires knowledge about what objects are related, their functional interactions, and where the objects are geometrically located. Different spatial relations have different functional and geometric bias. The wide usage of neural language models in different areas including generation of image description motivates the study of what kind of knowledge is encoded in neural language models about individual spatial relations. With the premise that the functional bias of relations is expressed in their word distributions, we construct multi-word distributional vector representations and show that these representations perform well on intrinsic semantic reasoning tasks, thus confirming our premise. A comparison of our vector representations to human semantic judgments indicates that different bias (functional or geometric) is captured in different data collection tasks which suggests that the contribution of the two meaning modalities is dynamic, related to the context of the task.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ghanimifard-dobnik-2019-neural">
<titleInfo>
<title>What a neural language model tells us about spatial relations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mehdi</namePart>
<namePart type="family">Ghanimifard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Simon</namePart>
<namePart type="family">Dobnik</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Combined Workshop on Spatial Language Understanding (SpLU) and Grounded Communication for Robotics (RoboNLP)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Archna</namePart>
<namePart type="family">Bhatia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yonatan</namePart>
<namePart type="family">Bisk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Parisa</namePart>
<namePart type="family">Kordjamshidi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jesse</namePart>
<namePart type="family">Thomason</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Minneapolis, Minnesota</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Understanding and generating spatial descriptions requires knowledge about what objects are related, their functional interactions, and where the objects are geometrically located. Different spatial relations have different functional and geometric bias. The wide usage of neural language models in different areas including generation of image description motivates the study of what kind of knowledge is encoded in neural language models about individual spatial relations. With the premise that the functional bias of relations is expressed in their word distributions, we construct multi-word distributional vector representations and show that these representations perform well on intrinsic semantic reasoning tasks, thus confirming our premise. A comparison of our vector representations to human semantic judgments indicates that different bias (functional or geometric) is captured in different data collection tasks which suggests that the contribution of the two meaning modalities is dynamic, related to the context of the task.</abstract>
<identifier type="citekey">ghanimifard-dobnik-2019-neural</identifier>
<identifier type="doi">10.18653/v1/W19-1608</identifier>
<location>
<url>https://aclanthology.org/W19-1608</url>
</location>
<part>
<date>2019-06</date>
<extent unit="page">
<start>71</start>
<end>81</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T What a neural language model tells us about spatial relations
%A Ghanimifard, Mehdi
%A Dobnik, Simon
%Y Bhatia, Archna
%Y Bisk, Yonatan
%Y Kordjamshidi, Parisa
%Y Thomason, Jesse
%S Proceedings of the Combined Workshop on Spatial Language Understanding (SpLU) and Grounded Communication for Robotics (RoboNLP)
%D 2019
%8 June
%I Association for Computational Linguistics
%C Minneapolis, Minnesota
%F ghanimifard-dobnik-2019-neural
%X Understanding and generating spatial descriptions requires knowledge about what objects are related, their functional interactions, and where the objects are geometrically located. Different spatial relations have different functional and geometric bias. The wide usage of neural language models in different areas including generation of image description motivates the study of what kind of knowledge is encoded in neural language models about individual spatial relations. With the premise that the functional bias of relations is expressed in their word distributions, we construct multi-word distributional vector representations and show that these representations perform well on intrinsic semantic reasoning tasks, thus confirming our premise. A comparison of our vector representations to human semantic judgments indicates that different bias (functional or geometric) is captured in different data collection tasks which suggests that the contribution of the two meaning modalities is dynamic, related to the context of the task.
%R 10.18653/v1/W19-1608
%U https://aclanthology.org/W19-1608
%U https://doi.org/10.18653/v1/W19-1608
%P 71-81
Markdown (Informal)
[What a neural language model tells us about spatial relations](https://aclanthology.org/W19-1608) (Ghanimifard & Dobnik, RoboNLP 2019)
ACL
- Mehdi Ghanimifard and Simon Dobnik. 2019. What a neural language model tells us about spatial relations. In Proceedings of the Combined Workshop on Spatial Language Understanding (SpLU) and Grounded Communication for Robotics (RoboNLP), pages 71–81, Minneapolis, Minnesota. Association for Computational Linguistics.