@inproceedings{ko-etal-2024-evidence,
title = "Evidence-Focused Fact Summarization for Knowledge-Augmented Zero-Shot Question Answering",
author = "Ko, Sungho and
Cho, Hyunjin and
Chae, Hyungjoo and
Yeo, Jinyoung and
Lee, Dongha",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.emnlp-main.594",
pages = "10636--10651",
abstract = "Recent studies have investigated utilizing Knowledge Graphs (KGs) to enhance Quesetion Answering (QA) performance of Large Language Models (LLMs), yet structured KG verbalization remains challenging. Existing methods, like concatenation or free-form textual conversion of triples, have limitations, including duplicated entities or relations, reduced evidence density, and failure to highlight crucial evidence. To address these issues, we propose EFSum, an Evidence-focused Fact Summarization framework for enhanced QA with knowledge-augmented LLMs. We optimize an LLM as a fact summarizer through distillation and preference alignment. Our extensive expeirments show that EFSum improves LLM{'}s zero-shot QA performance with its helpful and faithful summaries, especially when noisy facts are retrieved.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ko-etal-2024-evidence">
<titleInfo>
<title>Evidence-Focused Fact Summarization for Knowledge-Augmented Zero-Shot Question Answering</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sungho</namePart>
<namePart type="family">Ko</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hyunjin</namePart>
<namePart type="family">Cho</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hyungjoo</namePart>
<namePart type="family">Chae</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jinyoung</namePart>
<namePart type="family">Yeo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dongha</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent studies have investigated utilizing Knowledge Graphs (KGs) to enhance Quesetion Answering (QA) performance of Large Language Models (LLMs), yet structured KG verbalization remains challenging. Existing methods, like concatenation or free-form textual conversion of triples, have limitations, including duplicated entities or relations, reduced evidence density, and failure to highlight crucial evidence. To address these issues, we propose EFSum, an Evidence-focused Fact Summarization framework for enhanced QA with knowledge-augmented LLMs. We optimize an LLM as a fact summarizer through distillation and preference alignment. Our extensive expeirments show that EFSum improves LLM’s zero-shot QA performance with its helpful and faithful summaries, especially when noisy facts are retrieved.</abstract>
<identifier type="citekey">ko-etal-2024-evidence</identifier>
<location>
<url>https://aclanthology.org/2024.emnlp-main.594</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>10636</start>
<end>10651</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Evidence-Focused Fact Summarization for Knowledge-Augmented Zero-Shot Question Answering
%A Ko, Sungho
%A Cho, Hyunjin
%A Chae, Hyungjoo
%A Yeo, Jinyoung
%A Lee, Dongha
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F ko-etal-2024-evidence
%X Recent studies have investigated utilizing Knowledge Graphs (KGs) to enhance Quesetion Answering (QA) performance of Large Language Models (LLMs), yet structured KG verbalization remains challenging. Existing methods, like concatenation or free-form textual conversion of triples, have limitations, including duplicated entities or relations, reduced evidence density, and failure to highlight crucial evidence. To address these issues, we propose EFSum, an Evidence-focused Fact Summarization framework for enhanced QA with knowledge-augmented LLMs. We optimize an LLM as a fact summarizer through distillation and preference alignment. Our extensive expeirments show that EFSum improves LLM’s zero-shot QA performance with its helpful and faithful summaries, especially when noisy facts are retrieved.
%U https://aclanthology.org/2024.emnlp-main.594
%P 10636-10651
Markdown (Informal)
[Evidence-Focused Fact Summarization for Knowledge-Augmented Zero-Shot Question Answering](https://aclanthology.org/2024.emnlp-main.594) (Ko et al., EMNLP 2024)
ACL