@inproceedings{martynova-etal-2025-learn,
title = "Learn Together: Joint Multitask Finetuning of Pretrained {KG}-enhanced {LLM} for Downstream Tasks",
author = "Martynova, Anastasia and
Tishin, Vladislav and
Semenova, Natalia",
editor = "Gesese, Genet Asefa and
Sack, Harald and
Paulheim, Heiko and
Merono-Penuela, Albert and
Chen, Lihu",
booktitle = "Proceedings of the Workshop on Generative AI and Knowledge Graphs (GenAIK)",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "International Committee on Computational Linguistics",
url = "https://aclanthology.org/2025.genaik-1.2/",
pages = "13--19",
abstract = "Recent studies have shown that a knowledge graph (KG) can enhance text data by providing structured background knowledge, which can significantly improve the language understanding skills of the LLM. Besides, finetuning of such models shows solid results on commonsense reasoning benchmarks. In this work, we introduce expandable Joint Multitask Finetuning on Pretrained KG-enchanced LLM approach for Question Answering (QA), Machine Reading Comprehension (MRC) and Knowledge Graph Question Answering (KGQA) tasks. Extensive experiments show competitive performance of joint finetuning QA+MRC+KGQA over single task approach with a maximum gain of 30{\%} accuracy."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="martynova-etal-2025-learn">
<titleInfo>
<title>Learn Together: Joint Multitask Finetuning of Pretrained KG-enhanced LLM for Downstream Tasks</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anastasia</namePart>
<namePart type="family">Martynova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vladislav</namePart>
<namePart type="family">Tishin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Natalia</namePart>
<namePart type="family">Semenova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-01</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on Generative AI and Knowledge Graphs (GenAIK)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Genet</namePart>
<namePart type="given">Asefa</namePart>
<namePart type="family">Gesese</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Harald</namePart>
<namePart type="family">Sack</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Heiko</namePart>
<namePart type="family">Paulheim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Albert</namePart>
<namePart type="family">Merono-Penuela</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lihu</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>International Committee on Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, UAE</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent studies have shown that a knowledge graph (KG) can enhance text data by providing structured background knowledge, which can significantly improve the language understanding skills of the LLM. Besides, finetuning of such models shows solid results on commonsense reasoning benchmarks. In this work, we introduce expandable Joint Multitask Finetuning on Pretrained KG-enchanced LLM approach for Question Answering (QA), Machine Reading Comprehension (MRC) and Knowledge Graph Question Answering (KGQA) tasks. Extensive experiments show competitive performance of joint finetuning QA+MRC+KGQA over single task approach with a maximum gain of 30% accuracy.</abstract>
<identifier type="citekey">martynova-etal-2025-learn</identifier>
<location>
<url>https://aclanthology.org/2025.genaik-1.2/</url>
</location>
<part>
<date>2025-01</date>
<extent unit="page">
<start>13</start>
<end>19</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Learn Together: Joint Multitask Finetuning of Pretrained KG-enhanced LLM for Downstream Tasks
%A Martynova, Anastasia
%A Tishin, Vladislav
%A Semenova, Natalia
%Y Gesese, Genet Asefa
%Y Sack, Harald
%Y Paulheim, Heiko
%Y Merono-Penuela, Albert
%Y Chen, Lihu
%S Proceedings of the Workshop on Generative AI and Knowledge Graphs (GenAIK)
%D 2025
%8 January
%I International Committee on Computational Linguistics
%C Abu Dhabi, UAE
%F martynova-etal-2025-learn
%X Recent studies have shown that a knowledge graph (KG) can enhance text data by providing structured background knowledge, which can significantly improve the language understanding skills of the LLM. Besides, finetuning of such models shows solid results on commonsense reasoning benchmarks. In this work, we introduce expandable Joint Multitask Finetuning on Pretrained KG-enchanced LLM approach for Question Answering (QA), Machine Reading Comprehension (MRC) and Knowledge Graph Question Answering (KGQA) tasks. Extensive experiments show competitive performance of joint finetuning QA+MRC+KGQA over single task approach with a maximum gain of 30% accuracy.
%U https://aclanthology.org/2025.genaik-1.2/
%P 13-19
Markdown (Informal)
[Learn Together: Joint Multitask Finetuning of Pretrained KG-enhanced LLM for Downstream Tasks](https://aclanthology.org/2025.genaik-1.2/) (Martynova et al., GenAIK 2025)
ACL