@inproceedings{zejun-etal-2024-cong,
title = "从多模态预训练到多模态大模型:架构、训练、评测、趋势概览(From Multi-Modal Pre-Training to Multi-Modal Large Language Models: An Overview of Architectures, Training,)",
author = "Li, Zejun and
Zhang, Jiwen and
Wang, Ye and
Du, Mengfei and
Liu, Qingwen and
Wang, Dianyi and
Wu, Binhao and
Luo, Ruipu and
Huang, Xuanjing and
Wei, Zhongyu",
editor = "Xin, Zhao",
booktitle = "Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 2: Frontier Forum)",
month = jul,
year = "2024",
address = "Taiyuan, China",
publisher = "Chinese Information Processing Society of China",
url = "https://aclanthology.org/2024.ccl-2.1/",
pages = "1--33",
language = "zho",
abstract = "``多媒体信息在人类社会的发展历程中有着至关重要的作用,构建具有多模态信息处理能力的智能系统也是通往通用人工智能的必经之路。随着预训练技术的发展以及对于通用模型的需求,多模态的研究也从早期的任务特定的方法转移到了构建统一泛用的多模态基座模型上。初步的统一多模态模型探索受到BERT启发,从表征学习的角度出发构建能为不同下游任务提供有效初始化的多模态预训练模型,这类方法尽管有效但仍然在泛用性方面受限于预训练中微调范式,无法更广泛高效地应用。近年来随着大语言模型的发展,以大语言模型为基座的多模态大模型则展现出了巨大的潜力:此类模型有着强大的信息感知,交互,以及推理能力并且能有效泛化到多样的场景下,为新时代的通用人工智能系统提供了切实可行的思路。本文将从构建统一多模态模型的角度出发,介绍和梳理相关工作的发展,从多模态预训练到多模态大模型,介绍对应的架构,训练,评测方法以及发展趋势,为读者提供一个全面的概览。''"
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zejun-etal-2024-cong">
<titleInfo>
<title>从多模态预训练到多模态大模型:架构、训练、评测、趋势概览(From Multi-Modal Pre-Training to Multi-Modal Large Language Models: An Overview of Architectures, Training,)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zejun</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiwen</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ye</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mengfei</namePart>
<namePart type="family">Du</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qingwen</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dianyi</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Binhao</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ruipu</namePart>
<namePart type="family">Luo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xuanjing</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhongyu</namePart>
<namePart type="family">Wei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<language>
<languageTerm type="text">zho</languageTerm>
</language>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 2: Frontier Forum)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhao</namePart>
<namePart type="family">Xin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Chinese Information Processing Society of China</publisher>
<place>
<placeTerm type="text">Taiyuan, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>“多媒体信息在人类社会的发展历程中有着至关重要的作用,构建具有多模态信息处理能力的智能系统也是通往通用人工智能的必经之路。随着预训练技术的发展以及对于通用模型的需求,多模态的研究也从早期的任务特定的方法转移到了构建统一泛用的多模态基座模型上。初步的统一多模态模型探索受到BERT启发,从表征学习的角度出发构建能为不同下游任务提供有效初始化的多模态预训练模型,这类方法尽管有效但仍然在泛用性方面受限于预训练中微调范式,无法更广泛高效地应用。近年来随着大语言模型的发展,以大语言模型为基座的多模态大模型则展现出了巨大的潜力:此类模型有着强大的信息感知,交互,以及推理能力并且能有效泛化到多样的场景下,为新时代的通用人工智能系统提供了切实可行的思路。本文将从构建统一多模态模型的角度出发,介绍和梳理相关工作的发展,从多模态预训练到多模态大模型,介绍对应的架构,训练,评测方法以及发展趋势,为读者提供一个全面的概览。”</abstract>
<identifier type="citekey">zejun-etal-2024-cong</identifier>
<location>
<url>https://aclanthology.org/2024.ccl-2.1/</url>
</location>
<part>
<date>2024-07</date>
<extent unit="page">
<start>1</start>
<end>33</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T 从多模态预训练到多模态大模型:架构、训练、评测、趋势概览(From Multi-Modal Pre-Training to Multi-Modal Large Language Models: An Overview of Architectures, Training,)
%A Li, Zejun
%A Zhang, Jiwen
%A Wang, Ye
%A Du, Mengfei
%A Liu, Qingwen
%A Wang, Dianyi
%A Wu, Binhao
%A Luo, Ruipu
%A Huang, Xuanjing
%A Wei, Zhongyu
%Y Xin, Zhao
%S Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 2: Frontier Forum)
%D 2024
%8 July
%I Chinese Information Processing Society of China
%C Taiyuan, China
%G zho
%F zejun-etal-2024-cong
%X “多媒体信息在人类社会的发展历程中有着至关重要的作用,构建具有多模态信息处理能力的智能系统也是通往通用人工智能的必经之路。随着预训练技术的发展以及对于通用模型的需求,多模态的研究也从早期的任务特定的方法转移到了构建统一泛用的多模态基座模型上。初步的统一多模态模型探索受到BERT启发,从表征学习的角度出发构建能为不同下游任务提供有效初始化的多模态预训练模型,这类方法尽管有效但仍然在泛用性方面受限于预训练中微调范式,无法更广泛高效地应用。近年来随着大语言模型的发展,以大语言模型为基座的多模态大模型则展现出了巨大的潜力:此类模型有着强大的信息感知,交互,以及推理能力并且能有效泛化到多样的场景下,为新时代的通用人工智能系统提供了切实可行的思路。本文将从构建统一多模态模型的角度出发,介绍和梳理相关工作的发展,从多模态预训练到多模态大模型,介绍对应的架构,训练,评测方法以及发展趋势,为读者提供一个全面的概览。”
%U https://aclanthology.org/2024.ccl-2.1/
%P 1-33
Markdown (Informal)
[从多模态预训练到多模态大模型:架构、训练、评测、趋势概览(From Multi-Modal Pre-Training to Multi-Modal Large Language Models: An Overview of Architectures, Training,)](https://aclanthology.org/2024.ccl-2.1/) (Li et al., CCL 2024)
ACL
- Zejun Li, Jiwen Zhang, Ye Wang, Mengfei Du, Qingwen Liu, Dianyi Wang, Binhao Wu, Ruipu Luo, Xuanjing Huang, and Zhongyu Wei. 2024. 从多模态预训练到多模态大模型:架构、训练、评测、趋势概览(From Multi-Modal Pre-Training to Multi-Modal Large Language Models: An Overview of Architectures, Training,). In Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 2: Frontier Forum), pages 1–33, Taiyuan, China. Chinese Information Processing Society of China.