@inproceedings{wan-etal-2025-digest,
title = "Digest the Knowledge: Large Language Models empowered Message Passing for Knowledge Graph Question Answering",
author = "Wan, Junhong and
Yu, Tao and
Jiang, Kunyu and
Fu, Yao and
Jiang, Weihao and
Zhu, Jiang",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.acl-long.750/",
doi = "10.18653/v1/2025.acl-long.750",
pages = "15426--15442",
ISBN = "979-8-89176-251-0",
abstract = "Despite their success, large language models (LLMs) suffer from notorious hallucination issue. By introducing external knowledge stored in knowledge graphs (KGs), existing methods use paths as the medium to represent the graph information that send into LLMs. However, paths only contain limited graph structure information and are unorganized with redundant sequentially appeared keywords, which are difficult for LLMs to digest. We aim to find a suitable medium that captures the essence of structure knowledge in KGs. Inspired by the Neural Message Passing in Graph Neural Networks, we propose Language Message Passing (LMP) that first learns a concise facts graph by iteratively aggregates neighbor entities and transforms them into semantic facts, and then we performs Topological Readout that encodes the graph structure information into multi-level lists of texts to augment LLMs. Our method serves as a brand-new innovative framework that brings a new perspective into KG-enhanced LLMs, and also offers human-level semantic explainability with significant performance improvements over existing methods on all 5 knowledge graph question answering datasets. Code is available at https://github.com/wanjunhong0/LMP."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wan-etal-2025-digest">
<titleInfo>
<title>Digest the Knowledge: Large Language Models empowered Message Passing for Knowledge Graph Question Answering</title>
</titleInfo>
<name type="personal">
<namePart type="given">Junhong</namePart>
<namePart type="family">Wan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tao</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kunyu</namePart>
<namePart type="family">Jiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yao</namePart>
<namePart type="family">Fu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Weihao</namePart>
<namePart type="family">Jiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiang</namePart>
<namePart type="family">Zhu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-251-0</identifier>
</relatedItem>
<abstract>Despite their success, large language models (LLMs) suffer from notorious hallucination issue. By introducing external knowledge stored in knowledge graphs (KGs), existing methods use paths as the medium to represent the graph information that send into LLMs. However, paths only contain limited graph structure information and are unorganized with redundant sequentially appeared keywords, which are difficult for LLMs to digest. We aim to find a suitable medium that captures the essence of structure knowledge in KGs. Inspired by the Neural Message Passing in Graph Neural Networks, we propose Language Message Passing (LMP) that first learns a concise facts graph by iteratively aggregates neighbor entities and transforms them into semantic facts, and then we performs Topological Readout that encodes the graph structure information into multi-level lists of texts to augment LLMs. Our method serves as a brand-new innovative framework that brings a new perspective into KG-enhanced LLMs, and also offers human-level semantic explainability with significant performance improvements over existing methods on all 5 knowledge graph question answering datasets. Code is available at https://github.com/wanjunhong0/LMP.</abstract>
<identifier type="citekey">wan-etal-2025-digest</identifier>
<identifier type="doi">10.18653/v1/2025.acl-long.750</identifier>
<location>
<url>https://aclanthology.org/2025.acl-long.750/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>15426</start>
<end>15442</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Digest the Knowledge: Large Language Models empowered Message Passing for Knowledge Graph Question Answering
%A Wan, Junhong
%A Yu, Tao
%A Jiang, Kunyu
%A Fu, Yao
%A Jiang, Weihao
%A Zhu, Jiang
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-251-0
%F wan-etal-2025-digest
%X Despite their success, large language models (LLMs) suffer from notorious hallucination issue. By introducing external knowledge stored in knowledge graphs (KGs), existing methods use paths as the medium to represent the graph information that send into LLMs. However, paths only contain limited graph structure information and are unorganized with redundant sequentially appeared keywords, which are difficult for LLMs to digest. We aim to find a suitable medium that captures the essence of structure knowledge in KGs. Inspired by the Neural Message Passing in Graph Neural Networks, we propose Language Message Passing (LMP) that first learns a concise facts graph by iteratively aggregates neighbor entities and transforms them into semantic facts, and then we performs Topological Readout that encodes the graph structure information into multi-level lists of texts to augment LLMs. Our method serves as a brand-new innovative framework that brings a new perspective into KG-enhanced LLMs, and also offers human-level semantic explainability with significant performance improvements over existing methods on all 5 knowledge graph question answering datasets. Code is available at https://github.com/wanjunhong0/LMP.
%R 10.18653/v1/2025.acl-long.750
%U https://aclanthology.org/2025.acl-long.750/
%U https://doi.org/10.18653/v1/2025.acl-long.750
%P 15426-15442
Markdown (Informal)
[Digest the Knowledge: Large Language Models empowered Message Passing for Knowledge Graph Question Answering](https://aclanthology.org/2025.acl-long.750/) (Wan et al., ACL 2025)
ACL