@inproceedings{qian-etal-2024-ape,
title = "{APE}: Active Learning-based Tooling for Finding Informative Few-shot Examples for {LLM}-based Entity Matching",
author = "Qian, Kun and
Sang, Yisi and
Bayat{\dag}, Farima and
Belyi, Anton and
Chu, Xianqi and
Govind, Yash and
Khorshidi, Samira and
Khot, Rahul and
Luna, Katherine and
Nikfarjam, Azadeh and
Qi, Xiaoguang and
Wu, Fei and
Zhang, Xianhan and
Li, Yunyao",
editor = "Dragut, Eduard and
Li, Yunyao and
Popa, Lucian and
Vucetic, Slobodan and
Srivastava, Shashank",
booktitle = "Proceedings of the Fifth Workshop on Data Science with Human-in-the-Loop (DaSH 2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.dash-1.1",
doi = "10.18653/v1/2024.dash-1.1",
pages = "1--3",
abstract = "Prompt engineering is an iterative procedure that often requires extensive manual effort to formulate suitable instructions for effectively directing large language models (LLMs) in specific tasks. Incorporating few-shot examples is a vital and effective approach to provide LLMs with precise instructions, leading to improved LLM performance. Nonetheless, identifying the most informative demonstrations for LLMs is labor-intensive, frequently entailing sifting through an extensive search space. In this demonstration, we showcase a human-in-the-loop tool called ool (Active Prompt Engineering) designed for refining prompts through active learning. Drawing inspiration from active learning, ool iteratively selects the most ambiguous examples for human feedback, which will be transformed into few-shot examples within the prompt.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="qian-etal-2024-ape">
<titleInfo>
<title>APE: Active Learning-based Tooling for Finding Informative Few-shot Examples for LLM-based Entity Matching</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kun</namePart>
<namePart type="family">Qian</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yisi</namePart>
<namePart type="family">Sang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Farima</namePart>
<namePart type="family">Bayat\dag</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anton</namePart>
<namePart type="family">Belyi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xianqi</namePart>
<namePart type="family">Chu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yash</namePart>
<namePart type="family">Govind</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Samira</namePart>
<namePart type="family">Khorshidi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rahul</namePart>
<namePart type="family">Khot</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Katherine</namePart>
<namePart type="family">Luna</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Azadeh</namePart>
<namePart type="family">Nikfarjam</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiaoguang</namePart>
<namePart type="family">Qi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fei</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xianhan</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yunyao</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fifth Workshop on Data Science with Human-in-the-Loop (DaSH 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Eduard</namePart>
<namePart type="family">Dragut</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yunyao</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucian</namePart>
<namePart type="family">Popa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Slobodan</namePart>
<namePart type="family">Vucetic</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shashank</namePart>
<namePart type="family">Srivastava</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Prompt engineering is an iterative procedure that often requires extensive manual effort to formulate suitable instructions for effectively directing large language models (LLMs) in specific tasks. Incorporating few-shot examples is a vital and effective approach to provide LLMs with precise instructions, leading to improved LLM performance. Nonetheless, identifying the most informative demonstrations for LLMs is labor-intensive, frequently entailing sifting through an extensive search space. In this demonstration, we showcase a human-in-the-loop tool called ool (Active Prompt Engineering) designed for refining prompts through active learning. Drawing inspiration from active learning, ool iteratively selects the most ambiguous examples for human feedback, which will be transformed into few-shot examples within the prompt.</abstract>
<identifier type="citekey">qian-etal-2024-ape</identifier>
<identifier type="doi">10.18653/v1/2024.dash-1.1</identifier>
<location>
<url>https://aclanthology.org/2024.dash-1.1</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>1</start>
<end>3</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T APE: Active Learning-based Tooling for Finding Informative Few-shot Examples for LLM-based Entity Matching
%A Qian, Kun
%A Sang, Yisi
%A Bayat\dag, Farima
%A Belyi, Anton
%A Chu, Xianqi
%A Govind, Yash
%A Khorshidi, Samira
%A Khot, Rahul
%A Luna, Katherine
%A Nikfarjam, Azadeh
%A Qi, Xiaoguang
%A Wu, Fei
%A Zhang, Xianhan
%A Li, Yunyao
%Y Dragut, Eduard
%Y Li, Yunyao
%Y Popa, Lucian
%Y Vucetic, Slobodan
%Y Srivastava, Shashank
%S Proceedings of the Fifth Workshop on Data Science with Human-in-the-Loop (DaSH 2024)
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F qian-etal-2024-ape
%X Prompt engineering is an iterative procedure that often requires extensive manual effort to formulate suitable instructions for effectively directing large language models (LLMs) in specific tasks. Incorporating few-shot examples is a vital and effective approach to provide LLMs with precise instructions, leading to improved LLM performance. Nonetheless, identifying the most informative demonstrations for LLMs is labor-intensive, frequently entailing sifting through an extensive search space. In this demonstration, we showcase a human-in-the-loop tool called ool (Active Prompt Engineering) designed for refining prompts through active learning. Drawing inspiration from active learning, ool iteratively selects the most ambiguous examples for human feedback, which will be transformed into few-shot examples within the prompt.
%R 10.18653/v1/2024.dash-1.1
%U https://aclanthology.org/2024.dash-1.1
%U https://doi.org/10.18653/v1/2024.dash-1.1
%P 1-3
Markdown (Informal)
[APE: Active Learning-based Tooling for Finding Informative Few-shot Examples for LLM-based Entity Matching](https://aclanthology.org/2024.dash-1.1) (Qian et al., DaSH-WS 2024)
ACL
- Kun Qian, Yisi Sang, Farima Bayat†, Anton Belyi, Xianqi Chu, Yash Govind, Samira Khorshidi, Rahul Khot, Katherine Luna, Azadeh Nikfarjam, Xiaoguang Qi, Fei Wu, Xianhan Zhang, and Yunyao Li. 2024. APE: Active Learning-based Tooling for Finding Informative Few-shot Examples for LLM-based Entity Matching. In Proceedings of the Fifth Workshop on Data Science with Human-in-the-Loop (DaSH 2024), pages 1–3, Mexico City, Mexico. Association for Computational Linguistics.