@inproceedings{pradeep-etal-2024-convkgyarn,
title = "{C}onv{KGY}arn: Spinning Configurable and Scalable Conversational Knowledge Graph {QA} Datasets with Large Language Models",
author = "Pradeep, Ronak and
Lee, Daniel and
Mousavi, Ali and
Pound, Jeffrey and
Sang, Yisi and
Lin, Jimmy and
Ilyas, Ihab and
Potdar, Saloni and
Arefiyan, Mostafa and
Li, Yunyao",
editor = "Dernoncourt, Franck and
Preo{\c{t}}iuc-Pietro, Daniel and
Shimorina, Anastasia",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track",
month = nov,
year = "2024",
address = "Miami, Florida, US",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.emnlp-industry.89",
doi = "10.18653/v1/2024.emnlp-industry.89",
pages = "1176--1206",
abstract = "The rapid evolution of Large Language Models (LLMs) and conversational assistants necessitates dynamic, scalable, and configurable conversational datasets for training and evaluation.These datasets must accommodate diverse user interaction modes, including text and voice, each presenting unique modeling challenges. Knowledge Graphs (KGs), with their structured and evolving nature, offer an ideal foundation for current and precise knowledge.Although human-curated KG-based conversational datasets exist, they struggle to keep pace with the rapidly changing user information needs.We present ConvKGYarn, a scalable method for generating up-to-date and configurable conversational KGQA datasets. Qualitative psychometric analyses demonstrate ConvKGYarn{'}s effectiveness in producing high-quality data comparable to popular conversational KGQA datasets across various metrics.ConvKGYarn excels in adhering to human interaction configurations and operating at a significantly larger scale.We showcase ConvKGYarn{'}s utility by testing LLMs on diverse conversations {---} exploring model behavior on conversational KGQA sets with different configurations grounded in the same KG fact set.Our results highlight the ability of ConvKGYarn to improve KGQA foundations and evaluate parametric knowledge of LLMs, thus offering a robust solution to the constantly evolving landscape of conversational assistants.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="pradeep-etal-2024-convkgyarn">
<titleInfo>
<title>ConvKGYarn: Spinning Configurable and Scalable Conversational Knowledge Graph QA Datasets with Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ronak</namePart>
<namePart type="family">Pradeep</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ali</namePart>
<namePart type="family">Mousavi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jeffrey</namePart>
<namePart type="family">Pound</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yisi</namePart>
<namePart type="family">Sang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jimmy</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ihab</namePart>
<namePart type="family">Ilyas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saloni</namePart>
<namePart type="family">Potdar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mostafa</namePart>
<namePart type="family">Arefiyan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yunyao</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track</title>
</titleInfo>
<name type="personal">
<namePart type="given">Franck</namePart>
<namePart type="family">Dernoncourt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Preoţiuc-Pietro</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anastasia</namePart>
<namePart type="family">Shimorina</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, US</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The rapid evolution of Large Language Models (LLMs) and conversational assistants necessitates dynamic, scalable, and configurable conversational datasets for training and evaluation.These datasets must accommodate diverse user interaction modes, including text and voice, each presenting unique modeling challenges. Knowledge Graphs (KGs), with their structured and evolving nature, offer an ideal foundation for current and precise knowledge.Although human-curated KG-based conversational datasets exist, they struggle to keep pace with the rapidly changing user information needs.We present ConvKGYarn, a scalable method for generating up-to-date and configurable conversational KGQA datasets. Qualitative psychometric analyses demonstrate ConvKGYarn’s effectiveness in producing high-quality data comparable to popular conversational KGQA datasets across various metrics.ConvKGYarn excels in adhering to human interaction configurations and operating at a significantly larger scale.We showcase ConvKGYarn’s utility by testing LLMs on diverse conversations — exploring model behavior on conversational KGQA sets with different configurations grounded in the same KG fact set.Our results highlight the ability of ConvKGYarn to improve KGQA foundations and evaluate parametric knowledge of LLMs, thus offering a robust solution to the constantly evolving landscape of conversational assistants.</abstract>
<identifier type="citekey">pradeep-etal-2024-convkgyarn</identifier>
<identifier type="doi">10.18653/v1/2024.emnlp-industry.89</identifier>
<location>
<url>https://aclanthology.org/2024.emnlp-industry.89</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>1176</start>
<end>1206</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T ConvKGYarn: Spinning Configurable and Scalable Conversational Knowledge Graph QA Datasets with Large Language Models
%A Pradeep, Ronak
%A Lee, Daniel
%A Mousavi, Ali
%A Pound, Jeffrey
%A Sang, Yisi
%A Lin, Jimmy
%A Ilyas, Ihab
%A Potdar, Saloni
%A Arefiyan, Mostafa
%A Li, Yunyao
%Y Dernoncourt, Franck
%Y Preoţiuc-Pietro, Daniel
%Y Shimorina, Anastasia
%S Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, US
%F pradeep-etal-2024-convkgyarn
%X The rapid evolution of Large Language Models (LLMs) and conversational assistants necessitates dynamic, scalable, and configurable conversational datasets for training and evaluation.These datasets must accommodate diverse user interaction modes, including text and voice, each presenting unique modeling challenges. Knowledge Graphs (KGs), with their structured and evolving nature, offer an ideal foundation for current and precise knowledge.Although human-curated KG-based conversational datasets exist, they struggle to keep pace with the rapidly changing user information needs.We present ConvKGYarn, a scalable method for generating up-to-date and configurable conversational KGQA datasets. Qualitative psychometric analyses demonstrate ConvKGYarn’s effectiveness in producing high-quality data comparable to popular conversational KGQA datasets across various metrics.ConvKGYarn excels in adhering to human interaction configurations and operating at a significantly larger scale.We showcase ConvKGYarn’s utility by testing LLMs on diverse conversations — exploring model behavior on conversational KGQA sets with different configurations grounded in the same KG fact set.Our results highlight the ability of ConvKGYarn to improve KGQA foundations and evaluate parametric knowledge of LLMs, thus offering a robust solution to the constantly evolving landscape of conversational assistants.
%R 10.18653/v1/2024.emnlp-industry.89
%U https://aclanthology.org/2024.emnlp-industry.89
%U https://doi.org/10.18653/v1/2024.emnlp-industry.89
%P 1176-1206
Markdown (Informal)
[ConvKGYarn: Spinning Configurable and Scalable Conversational Knowledge Graph QA Datasets with Large Language Models](https://aclanthology.org/2024.emnlp-industry.89) (Pradeep et al., EMNLP 2024)
ACL
- Ronak Pradeep, Daniel Lee, Ali Mousavi, Jeffrey Pound, Yisi Sang, Jimmy Lin, Ihab Ilyas, Saloni Potdar, Mostafa Arefiyan, and Yunyao Li. 2024. ConvKGYarn: Spinning Configurable and Scalable Conversational Knowledge Graph QA Datasets with Large Language Models. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track, pages 1176–1206, Miami, Florida, US. Association for Computational Linguistics.