@inproceedings{zhangyin-etal-2024-qian,
title = "浅谈大模型时代下的检索增强:发展趋势、挑战与展望(Enhancing Large Language Models with Retrieval-Augmented Techniques: Trends, Challenges, and Prospects)",
author = "Zhangyin, Feng and
Kun, Zhu and
Weitao, Ma and
Lei, Huang and
Bing, Qin and
Ting, Liu and
Xiaocheng, Feng",
editor = "Zhao, Xin",
booktitle = "Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 2: Frontier Forum)",
month = jul,
year = "2024",
address = "Taiyuan, China",
publisher = "Chinese Information Processing Society of China",
url = "https://aclanthology.org/2024.ccl-2.9/",
pages = "151--168",
language = "zho",
abstract = "{\textquotedblleft}大型语言模型(LLM) 在各种自然语言任务上表现出了卓越的性能,但它们很容易受到过时数据和特定领域限制的影响。为了应对这些挑战,研究人员整合不同来源的外部信息来增强大语言模型,具体方法如检索增强等。在本文中,我们综合讨论了检索增强技术的发展趋势,包括检索时机规划、检索技术、以及检索结果的利用。此外,我们介绍了当前可用于检索增强任务的数据集和评价方法,并指出了应用和潜在研究方向。我们希望这项综述能够为社区提供对该研究领域的快速了解和全面概述,以启发未来的研究工作。{\textquotedblright}"
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhangyin-etal-2024-qian">
<titleInfo>
<title>浅谈大模型时代下的检索增强:发展趋势、挑战与展望(Enhancing Large Language Models with Retrieval-Augmented Techniques: Trends, Challenges, and Prospects)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Feng</namePart>
<namePart type="family">Zhangyin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhu</namePart>
<namePart type="family">Kun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ma</namePart>
<namePart type="family">Weitao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Huang</namePart>
<namePart type="family">Lei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qin</namePart>
<namePart type="family">Bing</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Liu</namePart>
<namePart type="family">Ting</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Feng</namePart>
<namePart type="family">Xiaocheng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<language>
<languageTerm type="text">zho</languageTerm>
</language>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 2: Frontier Forum)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Xin</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Chinese Information Processing Society of China</publisher>
<place>
<placeTerm type="text">Taiyuan, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>“大型语言模型(LLM) 在各种自然语言任务上表现出了卓越的性能,但它们很容易受到过时数据和特定领域限制的影响。为了应对这些挑战,研究人员整合不同来源的外部信息来增强大语言模型,具体方法如检索增强等。在本文中,我们综合讨论了检索增强技术的发展趋势,包括检索时机规划、检索技术、以及检索结果的利用。此外,我们介绍了当前可用于检索增强任务的数据集和评价方法,并指出了应用和潜在研究方向。我们希望这项综述能够为社区提供对该研究领域的快速了解和全面概述,以启发未来的研究工作。”</abstract>
<identifier type="citekey">zhangyin-etal-2024-qian</identifier>
<location>
<url>https://aclanthology.org/2024.ccl-2.9/</url>
</location>
<part>
<date>2024-07</date>
<extent unit="page">
<start>151</start>
<end>168</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T 浅谈大模型时代下的检索增强:发展趋势、挑战与展望(Enhancing Large Language Models with Retrieval-Augmented Techniques: Trends, Challenges, and Prospects)
%A Zhangyin, Feng
%A Kun, Zhu
%A Weitao, Ma
%A Lei, Huang
%A Bing, Qin
%A Ting, Liu
%A Xiaocheng, Feng
%Y Zhao, Xin
%S Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 2: Frontier Forum)
%D 2024
%8 July
%I Chinese Information Processing Society of China
%C Taiyuan, China
%G zho
%F zhangyin-etal-2024-qian
%X “大型语言模型(LLM) 在各种自然语言任务上表现出了卓越的性能,但它们很容易受到过时数据和特定领域限制的影响。为了应对这些挑战,研究人员整合不同来源的外部信息来增强大语言模型,具体方法如检索增强等。在本文中,我们综合讨论了检索增强技术的发展趋势,包括检索时机规划、检索技术、以及检索结果的利用。此外,我们介绍了当前可用于检索增强任务的数据集和评价方法,并指出了应用和潜在研究方向。我们希望这项综述能够为社区提供对该研究领域的快速了解和全面概述,以启发未来的研究工作。”
%U https://aclanthology.org/2024.ccl-2.9/
%P 151-168
Markdown (Informal)
[浅谈大模型时代下的检索增强:发展趋势、挑战与展望(Enhancing Large Language Models with Retrieval-Augmented Techniques: Trends, Challenges, and Prospects)](https://aclanthology.org/2024.ccl-2.9/) (Zhangyin et al., CCL 2024)
ACL
- Feng Zhangyin, Zhu Kun, Ma Weitao, Huang Lei, Qin Bing, Liu Ting, and Feng Xiaocheng. 2024. 浅谈大模型时代下的检索增强:发展趋势、挑战与展望(Enhancing Large Language Models with Retrieval-Augmented Techniques: Trends, Challenges, and Prospects). In Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 2: Frontier Forum), pages 151–168, Taiyuan, China. Chinese Information Processing Society of China.