@inproceedings{licari-etal-2024-novel,
title = "A Novel Multi-Step Prompt Approach for {LLM}-based {Q}{\&}As on Banking Supervisory Regulation",
author = "Licari, Daniele and
Benedetto, Canio and
Bushipaka, Praveen and
De Gregorio, Alessandro and
De Leonardis, Marco and
Cucinotta, Tommaso",
editor = "Dell'Orletta, Felice and
Lenci, Alessandro and
Montemagni, Simonetta and
Sprugnoli, Rachele",
booktitle = "Proceedings of the 10th Italian Conference on Computational Linguistics (CLiC-it 2024)",
month = dec,
year = "2024",
address = "Pisa, Italy",
publisher = "CEUR Workshop Proceedings",
url = "https://aclanthology.org/2024.clicit-1.59/",
pages = "496--509",
ISBN = "979-12-210-7060-6",
abstract = "This paper investigates the use of large language models (LLMs) in analyzing and answering questions related to banking supervisory regulation concerning reporting obligations. We introduce a multi-step prompt construction method that enhances the context provided to the LLM, resulting in more precise and informative answers. This multi-step approach is compared with a standard {\textquotedblleft}zero-shot{\textquotedblright} approach, which lacks context enrichment. To assess the quality of the generated responses, we utilize an LLM Evaluator. Our findings indicate that the multi-step approach significantly outperforms the zero-shot method, producing more comprehensive and accurate responses."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="licari-etal-2024-novel">
<titleInfo>
<title>A Novel Multi-Step Prompt Approach for LLM-based Q&As on Banking Supervisory Regulation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Daniele</namePart>
<namePart type="family">Licari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Canio</namePart>
<namePart type="family">Benedetto</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Praveen</namePart>
<namePart type="family">Bushipaka</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">De Gregorio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marco</namePart>
<namePart type="family">De Leonardis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tommaso</namePart>
<namePart type="family">Cucinotta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 10th Italian Conference on Computational Linguistics (CLiC-it 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Felice</namePart>
<namePart type="family">Dell’Orletta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Lenci</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Simonetta</namePart>
<namePart type="family">Montemagni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rachele</namePart>
<namePart type="family">Sprugnoli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>CEUR Workshop Proceedings</publisher>
<place>
<placeTerm type="text">Pisa, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-12-210-7060-6</identifier>
</relatedItem>
<abstract>This paper investigates the use of large language models (LLMs) in analyzing and answering questions related to banking supervisory regulation concerning reporting obligations. We introduce a multi-step prompt construction method that enhances the context provided to the LLM, resulting in more precise and informative answers. This multi-step approach is compared with a standard “zero-shot” approach, which lacks context enrichment. To assess the quality of the generated responses, we utilize an LLM Evaluator. Our findings indicate that the multi-step approach significantly outperforms the zero-shot method, producing more comprehensive and accurate responses.</abstract>
<identifier type="citekey">licari-etal-2024-novel</identifier>
<location>
<url>https://aclanthology.org/2024.clicit-1.59/</url>
</location>
<part>
<date>2024-12</date>
<extent unit="page">
<start>496</start>
<end>509</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Novel Multi-Step Prompt Approach for LLM-based Q&As on Banking Supervisory Regulation
%A Licari, Daniele
%A Benedetto, Canio
%A Bushipaka, Praveen
%A De Gregorio, Alessandro
%A De Leonardis, Marco
%A Cucinotta, Tommaso
%Y Dell’Orletta, Felice
%Y Lenci, Alessandro
%Y Montemagni, Simonetta
%Y Sprugnoli, Rachele
%S Proceedings of the 10th Italian Conference on Computational Linguistics (CLiC-it 2024)
%D 2024
%8 December
%I CEUR Workshop Proceedings
%C Pisa, Italy
%@ 979-12-210-7060-6
%F licari-etal-2024-novel
%X This paper investigates the use of large language models (LLMs) in analyzing and answering questions related to banking supervisory regulation concerning reporting obligations. We introduce a multi-step prompt construction method that enhances the context provided to the LLM, resulting in more precise and informative answers. This multi-step approach is compared with a standard “zero-shot” approach, which lacks context enrichment. To assess the quality of the generated responses, we utilize an LLM Evaluator. Our findings indicate that the multi-step approach significantly outperforms the zero-shot method, producing more comprehensive and accurate responses.
%U https://aclanthology.org/2024.clicit-1.59/
%P 496-509
Markdown (Informal)
[A Novel Multi-Step Prompt Approach for LLM-based Q&As on Banking Supervisory Regulation](https://aclanthology.org/2024.clicit-1.59/) (Licari et al., CLiC-it 2024)
ACL