@inproceedings{alshaikh-etal-2019-learning,
title = "Learning Conceptual Spaces with Disentangled Facets",
author = "Alshaikh, Rana and
Bouraoui, Zied and
Schockaert, Steven",
editor = "Bansal, Mohit and
Villavicencio, Aline",
booktitle = "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/K19-1013",
doi = "10.18653/v1/K19-1013",
pages = "131--139",
abstract = "Conceptual spaces are geometric representations of meaning that were proposed by G ̈ardenfors (2000). They share many similarities with the vector space embeddings that are commonly used in natural language processing. However, rather than representing entities in a single vector space, conceptual spaces are usually decomposed into several facets, each of which is then modelled as a relatively low dimensional vector space. Unfortunately, the problem of learning such conceptual spaces has thus far only received limited attention. To address this gap, we analyze how, and to what extent, a given vector space embedding can be decomposed into meaningful facets in an unsupervised fashion. While this problem is highly challenging, we show that useful facets can be discovered by relying on word embeddings to group semantically related features.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="alshaikh-etal-2019-learning">
<titleInfo>
<title>Learning Conceptual Spaces with Disentangled Facets</title>
</titleInfo>
<name type="personal">
<namePart type="given">Rana</namePart>
<namePart type="family">Alshaikh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zied</namePart>
<namePart type="family">Bouraoui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Schockaert</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aline</namePart>
<namePart type="family">Villavicencio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Hong Kong, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Conceptual spaces are geometric representations of meaning that were proposed by G ̈ardenfors (2000). They share many similarities with the vector space embeddings that are commonly used in natural language processing. However, rather than representing entities in a single vector space, conceptual spaces are usually decomposed into several facets, each of which is then modelled as a relatively low dimensional vector space. Unfortunately, the problem of learning such conceptual spaces has thus far only received limited attention. To address this gap, we analyze how, and to what extent, a given vector space embedding can be decomposed into meaningful facets in an unsupervised fashion. While this problem is highly challenging, we show that useful facets can be discovered by relying on word embeddings to group semantically related features.</abstract>
<identifier type="citekey">alshaikh-etal-2019-learning</identifier>
<identifier type="doi">10.18653/v1/K19-1013</identifier>
<location>
<url>https://aclanthology.org/K19-1013</url>
</location>
<part>
<date>2019-11</date>
<extent unit="page">
<start>131</start>
<end>139</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Learning Conceptual Spaces with Disentangled Facets
%A Alshaikh, Rana
%A Bouraoui, Zied
%A Schockaert, Steven
%Y Bansal, Mohit
%Y Villavicencio, Aline
%S Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)
%D 2019
%8 November
%I Association for Computational Linguistics
%C Hong Kong, China
%F alshaikh-etal-2019-learning
%X Conceptual spaces are geometric representations of meaning that were proposed by G ̈ardenfors (2000). They share many similarities with the vector space embeddings that are commonly used in natural language processing. However, rather than representing entities in a single vector space, conceptual spaces are usually decomposed into several facets, each of which is then modelled as a relatively low dimensional vector space. Unfortunately, the problem of learning such conceptual spaces has thus far only received limited attention. To address this gap, we analyze how, and to what extent, a given vector space embedding can be decomposed into meaningful facets in an unsupervised fashion. While this problem is highly challenging, we show that useful facets can be discovered by relying on word embeddings to group semantically related features.
%R 10.18653/v1/K19-1013
%U https://aclanthology.org/K19-1013
%U https://doi.org/10.18653/v1/K19-1013
%P 131-139
Markdown (Informal)
[Learning Conceptual Spaces with Disentangled Facets](https://aclanthology.org/K19-1013) (Alshaikh et al., CoNLL 2019)
ACL
- Rana Alshaikh, Zied Bouraoui, and Steven Schockaert. 2019. Learning Conceptual Spaces with Disentangled Facets. In Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pages 131–139, Hong Kong, China. Association for Computational Linguistics.