@inproceedings{wenhao-etal-2024-tilamb,
title = "{T}i{L}amb:基于增量预训练的藏文大语言模型({T}i{L}amb: A {T}ibetan Large Language Model Based on Incremental Pre-training)",
author = "Zhuang, Wenhao and
Sun, Yuan and
Zhao, Xiaobing",
editor = "Maosong, Sun and
Jiye, Liang and
Xianpei, Han and
Zhiyuan, Liu and
Yulan, He",
booktitle = "Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 1: Main Conference)",
month = jul,
year = "2024",
address = "Taiyuan, China",
publisher = "Chinese Information Processing Society of China",
url = "https://aclanthology.org/2024.ccl-1.19/",
pages = "254--267",
language = "zho",
abstract = "``基于{``}预训练+微调{''}范式的语言模型展现了卓越的性能,随着模型规模和训练数据量的扩增,其解决多种自然语言处理任务的能力得到了显著的提高。当前的大语言模型主要支持英汉等主流语言,这限制了藏语等低资源语言在该领域的研究。针对藏语数据稀缺、现有藏语预训练模型效果不够好、下游任务可扩展性差等问题,本文汇总清洗得到26.43GB藏文数据,以开源的LLaMA2-7B作为基座模型,扩充LLaMA2现有词表,增加了约30,000个藏文tokens,提高其藏文编码效率和对藏文的语义理解能力,通过增量预训练得到藏文大语言模型基座TiLamb。根据多种藏文下游任务分别制作数千到几万条不等的微调数据集,微调后的TiLamb在藏文新闻分类、藏文实体关系分类、藏文机器阅读理解、藏文分词、藏文摘要、藏文问题回答、藏文问题生成共七个下游任务中进行验证,多项指标结果相较传统方法和其他藏文预训练模型有大幅提升。本文将TiLamb和部分资源开放供研究使用,https://github.com/NLP-Learning/TiLamb。''"
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wenhao-etal-2024-tilamb">
<titleInfo>
<title>TiLamb:基于增量预训练的藏文大语言模型(TiLamb: A Tibetan Large Language Model Based on Incremental Pre-training)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wenhao</namePart>
<namePart type="family">Zhuang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yuan</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiaobing</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<language>
<languageTerm type="text">zho</languageTerm>
</language>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 1: Main Conference)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sun</namePart>
<namePart type="family">Maosong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Liang</namePart>
<namePart type="family">Jiye</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Han</namePart>
<namePart type="family">Xianpei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Liu</namePart>
<namePart type="family">Zhiyuan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">He</namePart>
<namePart type="family">Yulan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Chinese Information Processing Society of China</publisher>
<place>
<placeTerm type="text">Taiyuan, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>“基于“预训练+微调”范式的语言模型展现了卓越的性能,随着模型规模和训练数据量的扩增,其解决多种自然语言处理任务的能力得到了显著的提高。当前的大语言模型主要支持英汉等主流语言,这限制了藏语等低资源语言在该领域的研究。针对藏语数据稀缺、现有藏语预训练模型效果不够好、下游任务可扩展性差等问题,本文汇总清洗得到26.43GB藏文数据,以开源的LLaMA2-7B作为基座模型,扩充LLaMA2现有词表,增加了约30,000个藏文tokens,提高其藏文编码效率和对藏文的语义理解能力,通过增量预训练得到藏文大语言模型基座TiLamb。根据多种藏文下游任务分别制作数千到几万条不等的微调数据集,微调后的TiLamb在藏文新闻分类、藏文实体关系分类、藏文机器阅读理解、藏文分词、藏文摘要、藏文问题回答、藏文问题生成共七个下游任务中进行验证,多项指标结果相较传统方法和其他藏文预训练模型有大幅提升。本文将TiLamb和部分资源开放供研究使用,https://github.com/NLP-Learning/TiLamb。”</abstract>
<identifier type="citekey">wenhao-etal-2024-tilamb</identifier>
<location>
<url>https://aclanthology.org/2024.ccl-1.19/</url>
</location>
<part>
<date>2024-07</date>
<extent unit="page">
<start>254</start>
<end>267</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T TiLamb:基于增量预训练的藏文大语言模型(TiLamb: A Tibetan Large Language Model Based on Incremental Pre-training)
%A Zhuang, Wenhao
%A Sun, Yuan
%A Zhao, Xiaobing
%Y Maosong, Sun
%Y Jiye, Liang
%Y Xianpei, Han
%Y Zhiyuan, Liu
%Y Yulan, He
%S Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 1: Main Conference)
%D 2024
%8 July
%I Chinese Information Processing Society of China
%C Taiyuan, China
%G zho
%F wenhao-etal-2024-tilamb
%X “基于“预训练+微调”范式的语言模型展现了卓越的性能,随着模型规模和训练数据量的扩增,其解决多种自然语言处理任务的能力得到了显著的提高。当前的大语言模型主要支持英汉等主流语言,这限制了藏语等低资源语言在该领域的研究。针对藏语数据稀缺、现有藏语预训练模型效果不够好、下游任务可扩展性差等问题,本文汇总清洗得到26.43GB藏文数据,以开源的LLaMA2-7B作为基座模型,扩充LLaMA2现有词表,增加了约30,000个藏文tokens,提高其藏文编码效率和对藏文的语义理解能力,通过增量预训练得到藏文大语言模型基座TiLamb。根据多种藏文下游任务分别制作数千到几万条不等的微调数据集,微调后的TiLamb在藏文新闻分类、藏文实体关系分类、藏文机器阅读理解、藏文分词、藏文摘要、藏文问题回答、藏文问题生成共七个下游任务中进行验证,多项指标结果相较传统方法和其他藏文预训练模型有大幅提升。本文将TiLamb和部分资源开放供研究使用,https://github.com/NLP-Learning/TiLamb。”
%U https://aclanthology.org/2024.ccl-1.19/
%P 254-267
Markdown (Informal)
[TiLamb:基于增量预训练的藏文大语言模型(TiLamb: A Tibetan Large Language Model Based on Incremental Pre-training)](https://aclanthology.org/2024.ccl-1.19/) (Zhuang et al., CCL 2024)
ACL