@inproceedings{zhang-etal-2025-advancing-language,
title = "Advancing Language Models through Instruction Tuning: Recent Progress and Challenges",
author = "Zhang, Zhihan and
Lou, Renze and
Jiao, Fangkai and
Yin, Wenpeng and
Jiang, Meng",
editor = "Pyatkin, Valentina and
Vlachos, Andreas",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.emnlp-tutorials.2/",
pages = "4--6",
ISBN = "979-8-89176-336-4",
abstract = "The capability of following instructions is a key dimension for AI systems. Therefore, in NLP, instruction tuning {--} the process of training language models to follow natural language instructions {--} has become a fundamental component of the model development pipeline. This tutorial addresses three critical questions within the field: (1) What are the current focal points in instruction tuning research? (2) What are the best practices in training an instruction-following model? (3) What new challenges have emerged? To answer these questions, the tutorial presents a systematic overview of recent advances in instruction tuning. It covers different stages in model training: supervised fine-tuning, preference optimization, and reinforcement learning. It introduces scalable strategies for building high-quality instruction data, explores approaches for training autonomous AI agents that handle complex real-world tasks, and discusses common criteria for evaluating instruction-following models. The audience will gain a comprehensive understanding of cutting-edge trends in instruction tuning and insights into promising directions for future research."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhang-etal-2025-advancing-language">
<titleInfo>
<title>Advancing Language Models through Instruction Tuning: Recent Progress and Challenges</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhihan</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Renze</namePart>
<namePart type="family">Lou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fangkai</namePart>
<namePart type="family">Jiao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wenpeng</namePart>
<namePart type="family">Yin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Meng</namePart>
<namePart type="family">Jiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts</title>
</titleInfo>
<name type="personal">
<namePart type="given">Valentina</namePart>
<namePart type="family">Pyatkin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andreas</namePart>
<namePart type="family">Vlachos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-336-4</identifier>
</relatedItem>
<abstract>The capability of following instructions is a key dimension for AI systems. Therefore, in NLP, instruction tuning – the process of training language models to follow natural language instructions – has become a fundamental component of the model development pipeline. This tutorial addresses three critical questions within the field: (1) What are the current focal points in instruction tuning research? (2) What are the best practices in training an instruction-following model? (3) What new challenges have emerged? To answer these questions, the tutorial presents a systematic overview of recent advances in instruction tuning. It covers different stages in model training: supervised fine-tuning, preference optimization, and reinforcement learning. It introduces scalable strategies for building high-quality instruction data, explores approaches for training autonomous AI agents that handle complex real-world tasks, and discusses common criteria for evaluating instruction-following models. The audience will gain a comprehensive understanding of cutting-edge trends in instruction tuning and insights into promising directions for future research.</abstract>
<identifier type="citekey">zhang-etal-2025-advancing-language</identifier>
<location>
<url>https://aclanthology.org/2025.emnlp-tutorials.2/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>4</start>
<end>6</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Advancing Language Models through Instruction Tuning: Recent Progress and Challenges
%A Zhang, Zhihan
%A Lou, Renze
%A Jiao, Fangkai
%A Yin, Wenpeng
%A Jiang, Meng
%Y Pyatkin, Valentina
%Y Vlachos, Andreas
%S Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-336-4
%F zhang-etal-2025-advancing-language
%X The capability of following instructions is a key dimension for AI systems. Therefore, in NLP, instruction tuning – the process of training language models to follow natural language instructions – has become a fundamental component of the model development pipeline. This tutorial addresses three critical questions within the field: (1) What are the current focal points in instruction tuning research? (2) What are the best practices in training an instruction-following model? (3) What new challenges have emerged? To answer these questions, the tutorial presents a systematic overview of recent advances in instruction tuning. It covers different stages in model training: supervised fine-tuning, preference optimization, and reinforcement learning. It introduces scalable strategies for building high-quality instruction data, explores approaches for training autonomous AI agents that handle complex real-world tasks, and discusses common criteria for evaluating instruction-following models. The audience will gain a comprehensive understanding of cutting-edge trends in instruction tuning and insights into promising directions for future research.
%U https://aclanthology.org/2025.emnlp-tutorials.2/
%P 4-6
Markdown (Informal)
[Advancing Language Models through Instruction Tuning: Recent Progress and Challenges](https://aclanthology.org/2025.emnlp-tutorials.2/) (Zhang et al., EMNLP 2025)
ACL