@inproceedings{penkov-2024-mitigating,
title = "Mitigating Hallucinations in Large Language Models via Semantic Enrichment of Prompts: Insights from {B}io{BERT} and Ontological Integration",
author = "Penkov, Stanislav",
booktitle = "Proceedings of the Sixth International Conference on Computational Linguistics in Bulgaria (CLIB 2024)",
month = sep,
year = "2024",
address = "Sofia, Bulgaria",
publisher = "Department of Computational Linguistics, Institute for Bulgarian Language, Bulgarian Academy of Sciences",
url = "https://aclanthology.org/2024.clib-1.30",
pages = "272--276",
abstract = "The advent of Large Language Models (LLMs) has been transformative for natural language processing, yet their tendency to produce {``}hallucinations{''}{---}outputs that are factually incorrect or entirely fabricated{---} remains a significant hurdle. This paper introduces a proactive methodology for reducing hallucinations by strategically enriching LLM prompts. This involves identifying key entities and contextual cues from varied domains and integrating this information into the LLM prompts to guide the model towards more accurate and relevant responses. Leveraging examples from BioBERT for biomedical entity recognition and ChEBI for chemical ontology, we illustrate a broader approach that encompasses semantic prompt enrichment as a versatile tool for enhancing LLM output accuracy. By examining the potential of semantic and ontological enrichment in diverse contexts, we aim to present a scalable strategy for improving the reliability of AI-generated content, thereby contributing to the ongoing efforts to refine LLMs for a wide range of applications.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="penkov-2024-mitigating">
<titleInfo>
<title>Mitigating Hallucinations in Large Language Models via Semantic Enrichment of Prompts: Insights from BioBERT and Ontological Integration</title>
</titleInfo>
<name type="personal">
<namePart type="given">Stanislav</namePart>
<namePart type="family">Penkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Sixth International Conference on Computational Linguistics in Bulgaria (CLIB 2024)</title>
</titleInfo>
<originInfo>
<publisher>Department of Computational Linguistics, Institute for Bulgarian Language, Bulgarian Academy of Sciences</publisher>
<place>
<placeTerm type="text">Sofia, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The advent of Large Language Models (LLMs) has been transformative for natural language processing, yet their tendency to produce “hallucinations”—outputs that are factually incorrect or entirely fabricated— remains a significant hurdle. This paper introduces a proactive methodology for reducing hallucinations by strategically enriching LLM prompts. This involves identifying key entities and contextual cues from varied domains and integrating this information into the LLM prompts to guide the model towards more accurate and relevant responses. Leveraging examples from BioBERT for biomedical entity recognition and ChEBI for chemical ontology, we illustrate a broader approach that encompasses semantic prompt enrichment as a versatile tool for enhancing LLM output accuracy. By examining the potential of semantic and ontological enrichment in diverse contexts, we aim to present a scalable strategy for improving the reliability of AI-generated content, thereby contributing to the ongoing efforts to refine LLMs for a wide range of applications.</abstract>
<identifier type="citekey">penkov-2024-mitigating</identifier>
<location>
<url>https://aclanthology.org/2024.clib-1.30</url>
</location>
<part>
<date>2024-09</date>
<extent unit="page">
<start>272</start>
<end>276</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Mitigating Hallucinations in Large Language Models via Semantic Enrichment of Prompts: Insights from BioBERT and Ontological Integration
%A Penkov, Stanislav
%S Proceedings of the Sixth International Conference on Computational Linguistics in Bulgaria (CLIB 2024)
%D 2024
%8 September
%I Department of Computational Linguistics, Institute for Bulgarian Language, Bulgarian Academy of Sciences
%C Sofia, Bulgaria
%F penkov-2024-mitigating
%X The advent of Large Language Models (LLMs) has been transformative for natural language processing, yet their tendency to produce “hallucinations”—outputs that are factually incorrect or entirely fabricated— remains a significant hurdle. This paper introduces a proactive methodology for reducing hallucinations by strategically enriching LLM prompts. This involves identifying key entities and contextual cues from varied domains and integrating this information into the LLM prompts to guide the model towards more accurate and relevant responses. Leveraging examples from BioBERT for biomedical entity recognition and ChEBI for chemical ontology, we illustrate a broader approach that encompasses semantic prompt enrichment as a versatile tool for enhancing LLM output accuracy. By examining the potential of semantic and ontological enrichment in diverse contexts, we aim to present a scalable strategy for improving the reliability of AI-generated content, thereby contributing to the ongoing efforts to refine LLMs for a wide range of applications.
%U https://aclanthology.org/2024.clib-1.30
%P 272-276
Markdown (Informal)
[Mitigating Hallucinations in Large Language Models via Semantic Enrichment of Prompts: Insights from BioBERT and Ontological Integration](https://aclanthology.org/2024.clib-1.30) (Penkov, CLIB 2024)
ACL