@inproceedings{guan-etal-2025-use,
title = "Should We Use a Fixed Embedding Size? Customized Dimension Sizes for Knowledge Graph Embedding",
author = "Guan, Zhanpeng and
Zhang, Zhao and
Wu, Yiqing and
Zhang, Fuwei and
Xu, Yongjun",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.coling-main.604/",
pages = "9006--9012",
abstract = "Knowledge Graph Embedding (KGE) aims to project entities and relations into a low-dimensional space, so as to enable Knowledge Graphs (KGs) to be effectively used by downstream AI tasks. Most existing KGs (e.g. Wikidata) suffer from the data imbalance issue, i.e., the occurrence frequencies vary significantly among different entities. Current KGE models use a fixed embedding size, leading to overfitting for low-frequency entities and underfitting for high-frequency ones. A simple method is to manually set embedding sizes based on frequency, but this is not feasible due to the complexity and the large number of entities. To this end, we propose CustomizE, which customizes embedding sizes in a data-driven way, assigning larger sizes for high-frequency entities and smaller sizes for low-frequency ones. We use bilevel optimization for stable learning of representations and sizes. It is noteworthy that our framework is universal and flexible, which is suitable for various KGE models. Experiments on link prediction tasks show its superiority over state-of-the-art baselines."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="guan-etal-2025-use">
<titleInfo>
<title>Should We Use a Fixed Embedding Size? Customized Dimension Sizes for Knowledge Graph Embedding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhanpeng</namePart>
<namePart type="family">Guan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhao</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yiqing</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fuwei</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yongjun</namePart>
<namePart type="family">Xu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-01</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 31st International Conference on Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Owen</namePart>
<namePart type="family">Rambow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leo</namePart>
<namePart type="family">Wanner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marianna</namePart>
<namePart type="family">Apidianaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hend</namePart>
<namePart type="family">Al-Khalifa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Barbara</namePart>
<namePart type="given">Di</namePart>
<namePart type="family">Eugenio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Schockaert</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, UAE</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Knowledge Graph Embedding (KGE) aims to project entities and relations into a low-dimensional space, so as to enable Knowledge Graphs (KGs) to be effectively used by downstream AI tasks. Most existing KGs (e.g. Wikidata) suffer from the data imbalance issue, i.e., the occurrence frequencies vary significantly among different entities. Current KGE models use a fixed embedding size, leading to overfitting for low-frequency entities and underfitting for high-frequency ones. A simple method is to manually set embedding sizes based on frequency, but this is not feasible due to the complexity and the large number of entities. To this end, we propose CustomizE, which customizes embedding sizes in a data-driven way, assigning larger sizes for high-frequency entities and smaller sizes for low-frequency ones. We use bilevel optimization for stable learning of representations and sizes. It is noteworthy that our framework is universal and flexible, which is suitable for various KGE models. Experiments on link prediction tasks show its superiority over state-of-the-art baselines.</abstract>
<identifier type="citekey">guan-etal-2025-use</identifier>
<location>
<url>https://aclanthology.org/2025.coling-main.604/</url>
</location>
<part>
<date>2025-01</date>
<extent unit="page">
<start>9006</start>
<end>9012</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Should We Use a Fixed Embedding Size? Customized Dimension Sizes for Knowledge Graph Embedding
%A Guan, Zhanpeng
%A Zhang, Zhao
%A Wu, Yiqing
%A Zhang, Fuwei
%A Xu, Yongjun
%Y Rambow, Owen
%Y Wanner, Leo
%Y Apidianaki, Marianna
%Y Al-Khalifa, Hend
%Y Eugenio, Barbara Di
%Y Schockaert, Steven
%S Proceedings of the 31st International Conference on Computational Linguistics
%D 2025
%8 January
%I Association for Computational Linguistics
%C Abu Dhabi, UAE
%F guan-etal-2025-use
%X Knowledge Graph Embedding (KGE) aims to project entities and relations into a low-dimensional space, so as to enable Knowledge Graphs (KGs) to be effectively used by downstream AI tasks. Most existing KGs (e.g. Wikidata) suffer from the data imbalance issue, i.e., the occurrence frequencies vary significantly among different entities. Current KGE models use a fixed embedding size, leading to overfitting for low-frequency entities and underfitting for high-frequency ones. A simple method is to manually set embedding sizes based on frequency, but this is not feasible due to the complexity and the large number of entities. To this end, we propose CustomizE, which customizes embedding sizes in a data-driven way, assigning larger sizes for high-frequency entities and smaller sizes for low-frequency ones. We use bilevel optimization for stable learning of representations and sizes. It is noteworthy that our framework is universal and flexible, which is suitable for various KGE models. Experiments on link prediction tasks show its superiority over state-of-the-art baselines.
%U https://aclanthology.org/2025.coling-main.604/
%P 9006-9012
Markdown (Informal)
[Should We Use a Fixed Embedding Size? Customized Dimension Sizes for Knowledge Graph Embedding](https://aclanthology.org/2025.coling-main.604/) (Guan et al., COLING 2025)
ACL