@inproceedings{zhu-etal-2025-llmlink,
title = "{L}lm{L}ink: Dual {LLM}s for Dynamic Entity Linking on Long Narratives with Collaborative Memorisation and Prompt Optimisation",
author = "Zhu, Lixing and
Wang, Jun and
He, Yulan",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.coling-main.751/",
pages = "11334--11347",
abstract = "We address the task of CoREFerence resolution (CoREF) in chunked long narratives. Existing approaches remain either focused on supervised fine-tuning or limited to one-off prediction, which poses a challenge where the context is long. We develop a dynamic approach to cope with this: by deploying dual Large Language Models (LLMs), we assign specialised LLMs to local named entity recognition and distant CoREF tasks, respectively, while ensuring their exchange of information. Utilising our novel memorisation schemes, the coreference resolution LLM would memorise characters and their associated descriptions, thereby reducing token consumption compared with storing previous messages. To alleviate hallucinations of LLMs, we employ an automatic prompt optimisation method, with the LLM ranker modified to leverage annotations. Our approach achieves performance gains over other LLM-based models and fine-tuning approaches on long narrative datasets, significantly reducing the resources required for inference and training."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhu-etal-2025-llmlink">
<titleInfo>
<title>LlmLink: Dual LLMs for Dynamic Entity Linking on Long Narratives with Collaborative Memorisation and Prompt Optimisation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lixing</namePart>
<namePart type="family">Zhu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jun</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulan</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-01</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 31st International Conference on Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Owen</namePart>
<namePart type="family">Rambow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leo</namePart>
<namePart type="family">Wanner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marianna</namePart>
<namePart type="family">Apidianaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hend</namePart>
<namePart type="family">Al-Khalifa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Barbara</namePart>
<namePart type="given">Di</namePart>
<namePart type="family">Eugenio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Schockaert</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, UAE</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We address the task of CoREFerence resolution (CoREF) in chunked long narratives. Existing approaches remain either focused on supervised fine-tuning or limited to one-off prediction, which poses a challenge where the context is long. We develop a dynamic approach to cope with this: by deploying dual Large Language Models (LLMs), we assign specialised LLMs to local named entity recognition and distant CoREF tasks, respectively, while ensuring their exchange of information. Utilising our novel memorisation schemes, the coreference resolution LLM would memorise characters and their associated descriptions, thereby reducing token consumption compared with storing previous messages. To alleviate hallucinations of LLMs, we employ an automatic prompt optimisation method, with the LLM ranker modified to leverage annotations. Our approach achieves performance gains over other LLM-based models and fine-tuning approaches on long narrative datasets, significantly reducing the resources required for inference and training.</abstract>
<identifier type="citekey">zhu-etal-2025-llmlink</identifier>
<location>
<url>https://aclanthology.org/2025.coling-main.751/</url>
</location>
<part>
<date>2025-01</date>
<extent unit="page">
<start>11334</start>
<end>11347</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T LlmLink: Dual LLMs for Dynamic Entity Linking on Long Narratives with Collaborative Memorisation and Prompt Optimisation
%A Zhu, Lixing
%A Wang, Jun
%A He, Yulan
%Y Rambow, Owen
%Y Wanner, Leo
%Y Apidianaki, Marianna
%Y Al-Khalifa, Hend
%Y Eugenio, Barbara Di
%Y Schockaert, Steven
%S Proceedings of the 31st International Conference on Computational Linguistics
%D 2025
%8 January
%I Association for Computational Linguistics
%C Abu Dhabi, UAE
%F zhu-etal-2025-llmlink
%X We address the task of CoREFerence resolution (CoREF) in chunked long narratives. Existing approaches remain either focused on supervised fine-tuning or limited to one-off prediction, which poses a challenge where the context is long. We develop a dynamic approach to cope with this: by deploying dual Large Language Models (LLMs), we assign specialised LLMs to local named entity recognition and distant CoREF tasks, respectively, while ensuring their exchange of information. Utilising our novel memorisation schemes, the coreference resolution LLM would memorise characters and their associated descriptions, thereby reducing token consumption compared with storing previous messages. To alleviate hallucinations of LLMs, we employ an automatic prompt optimisation method, with the LLM ranker modified to leverage annotations. Our approach achieves performance gains over other LLM-based models and fine-tuning approaches on long narrative datasets, significantly reducing the resources required for inference and training.
%U https://aclanthology.org/2025.coling-main.751/
%P 11334-11347
Markdown (Informal)
[LlmLink: Dual LLMs for Dynamic Entity Linking on Long Narratives with Collaborative Memorisation and Prompt Optimisation](https://aclanthology.org/2025.coling-main.751/) (Zhu et al., COLING 2025)
ACL