@inproceedings{chan-etal-2025-prompto,
title = "Prompto: An open source library for asynchronous querying of {LLM} endpoints",
author = "Chan, Ryan Sze-Yin and
Nanni, Federico and
Williams, Angus Redlarski and
Brown, Edwin and
Burke-Moore, Liam and
Chapman, Ed and
Onslow, Kate and
Sippy, Tvesha and
Bright, Jonathan and
Gabasova, Evelina",
editor = "Dziri, Nouha and
Ren, Sean (Xiang) and
Diao, Shizhe",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (System Demonstrations)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.naacl-demo.11/",
doi = "10.18653/v1/2025.naacl-demo.11",
pages = "106--115",
ISBN = "979-8-89176-191-9",
abstract = "Recent surge in Large Language Model (LLM) availability has opened exciting avenues for research. However, efficiently interacting with these models presents a significant hurdle since LLMs often reside on proprietary or self-hosted API endpoints, each requiring custom code for interaction. Conducting comparative studies between different models can therefore be time-consuming and necessitate significant engineering effort, hindering research efficiency and reproducibility. To address these challenges, we present prompto, an open source Python library which facilitates asynchronous querying of LLM endpoints enabling researchers to interact with multiple LLMs concurrently, while maximising efficiency and utilising individual rate limits. Our library empowers researchers and developers to interact with LLMs more effectively and allowing faster experimentation, data generation and evaluation. prompto is released with an introductory video (https://youtu.be/lWN9hXBOLyQ) under MIT License and is available via GitHub (https://github.com/alan-turing-institute/prompto)."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chan-etal-2025-prompto">
<titleInfo>
<title>Prompto: An open source library for asynchronous querying of LLM endpoints</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ryan</namePart>
<namePart type="given">Sze-Yin</namePart>
<namePart type="family">Chan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Federico</namePart>
<namePart type="family">Nanni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Angus</namePart>
<namePart type="given">Redlarski</namePart>
<namePart type="family">Williams</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Edwin</namePart>
<namePart type="family">Brown</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Liam</namePart>
<namePart type="family">Burke-Moore</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ed</namePart>
<namePart type="family">Chapman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kate</namePart>
<namePart type="family">Onslow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tvesha</namePart>
<namePart type="family">Sippy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jonathan</namePart>
<namePart type="family">Bright</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Evelina</namePart>
<namePart type="family">Gabasova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (System Demonstrations)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nouha</namePart>
<namePart type="family">Dziri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sean</namePart>
<namePart type="given">(Xiang)</namePart>
<namePart type="family">Ren</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shizhe</namePart>
<namePart type="family">Diao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Albuquerque, New Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-191-9</identifier>
</relatedItem>
<abstract>Recent surge in Large Language Model (LLM) availability has opened exciting avenues for research. However, efficiently interacting with these models presents a significant hurdle since LLMs often reside on proprietary or self-hosted API endpoints, each requiring custom code for interaction. Conducting comparative studies between different models can therefore be time-consuming and necessitate significant engineering effort, hindering research efficiency and reproducibility. To address these challenges, we present prompto, an open source Python library which facilitates asynchronous querying of LLM endpoints enabling researchers to interact with multiple LLMs concurrently, while maximising efficiency and utilising individual rate limits. Our library empowers researchers and developers to interact with LLMs more effectively and allowing faster experimentation, data generation and evaluation. prompto is released with an introductory video (https://youtu.be/lWN9hXBOLyQ) under MIT License and is available via GitHub (https://github.com/alan-turing-institute/prompto).</abstract>
<identifier type="citekey">chan-etal-2025-prompto</identifier>
<identifier type="doi">10.18653/v1/2025.naacl-demo.11</identifier>
<location>
<url>https://aclanthology.org/2025.naacl-demo.11/</url>
</location>
<part>
<date>2025-04</date>
<extent unit="page">
<start>106</start>
<end>115</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Prompto: An open source library for asynchronous querying of LLM endpoints
%A Chan, Ryan Sze-Yin
%A Nanni, Federico
%A Williams, Angus Redlarski
%A Brown, Edwin
%A Burke-Moore, Liam
%A Chapman, Ed
%A Onslow, Kate
%A Sippy, Tvesha
%A Bright, Jonathan
%A Gabasova, Evelina
%Y Dziri, Nouha
%Y Ren, Sean (Xiang)
%Y Diao, Shizhe
%S Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (System Demonstrations)
%D 2025
%8 April
%I Association for Computational Linguistics
%C Albuquerque, New Mexico
%@ 979-8-89176-191-9
%F chan-etal-2025-prompto
%X Recent surge in Large Language Model (LLM) availability has opened exciting avenues for research. However, efficiently interacting with these models presents a significant hurdle since LLMs often reside on proprietary or self-hosted API endpoints, each requiring custom code for interaction. Conducting comparative studies between different models can therefore be time-consuming and necessitate significant engineering effort, hindering research efficiency and reproducibility. To address these challenges, we present prompto, an open source Python library which facilitates asynchronous querying of LLM endpoints enabling researchers to interact with multiple LLMs concurrently, while maximising efficiency and utilising individual rate limits. Our library empowers researchers and developers to interact with LLMs more effectively and allowing faster experimentation, data generation and evaluation. prompto is released with an introductory video (https://youtu.be/lWN9hXBOLyQ) under MIT License and is available via GitHub (https://github.com/alan-turing-institute/prompto).
%R 10.18653/v1/2025.naacl-demo.11
%U https://aclanthology.org/2025.naacl-demo.11/
%U https://doi.org/10.18653/v1/2025.naacl-demo.11
%P 106-115
Markdown (Informal)
[Prompto: An open source library for asynchronous querying of LLM endpoints](https://aclanthology.org/2025.naacl-demo.11/) (Chan et al., NAACL 2025)
ACL
- Ryan Sze-Yin Chan, Federico Nanni, Angus Redlarski Williams, Edwin Brown, Liam Burke-Moore, Ed Chapman, Kate Onslow, Tvesha Sippy, Jonathan Bright, and Evelina Gabasova. 2025. Prompto: An open source library for asynchronous querying of LLM endpoints. In Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (System Demonstrations), pages 106–115, Albuquerque, New Mexico. Association for Computational Linguistics.