@inproceedings{rahman-caragea-2025-llm,
title = "{LLM}-Guided Co-Training for Text Classification",
author = "Rahman, Md Mezbaur and
Caragea, Cornelia",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.emnlp-main.1583/",
doi = "10.18653/v1/2025.emnlp-main.1583",
pages = "31104--31121",
ISBN = "979-8-89176-332-6",
abstract = "In this paper, we introduce a novel weighted co-training approach that is guided by Large Language Models (LLMs). Namely, in our co-training approach, we use LLM labels on unlabeled data as target labels and co-train two encoder-only based networks that train each other over multiple iterations: first, all samples are forwarded through each network and historical estimates of each network{'}s confidence in the LLM label are recorded; second, a dynamic importance weight is derived for each sample according to each network{'}s belief (or confidence) in the quality of the LLM label for that sample; finally, the two networks exchange importance weights with each other{---}each network back-propagates all samples weighted with the importance weights coming from its peer network and updates its own parameters. By strategically utilizing LLM-generated guidance, our approach significantly outperforms conventional SSL methods, particularly in settings with abundant unlabeled data. Empirical results show that it achieves state-of-the-art performance on 4 out of 5 benchmark datasets and ranks first among 14 compared methods according to the Friedman test. Our results highlight a new direction in semi-supervised learning{---}where LLMs serve as knowledge amplifiers, enabling backbone co-training models to achieve SOTA performance efficiently."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="rahman-caragea-2025-llm">
<titleInfo>
<title>LLM-Guided Co-Training for Text Classification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Md</namePart>
<namePart type="given">Mezbaur</namePart>
<namePart type="family">Rahman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cornelia</namePart>
<namePart type="family">Caragea</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-332-6</identifier>
</relatedItem>
<abstract>In this paper, we introduce a novel weighted co-training approach that is guided by Large Language Models (LLMs). Namely, in our co-training approach, we use LLM labels on unlabeled data as target labels and co-train two encoder-only based networks that train each other over multiple iterations: first, all samples are forwarded through each network and historical estimates of each network’s confidence in the LLM label are recorded; second, a dynamic importance weight is derived for each sample according to each network’s belief (or confidence) in the quality of the LLM label for that sample; finally, the two networks exchange importance weights with each other—each network back-propagates all samples weighted with the importance weights coming from its peer network and updates its own parameters. By strategically utilizing LLM-generated guidance, our approach significantly outperforms conventional SSL methods, particularly in settings with abundant unlabeled data. Empirical results show that it achieves state-of-the-art performance on 4 out of 5 benchmark datasets and ranks first among 14 compared methods according to the Friedman test. Our results highlight a new direction in semi-supervised learning—where LLMs serve as knowledge amplifiers, enabling backbone co-training models to achieve SOTA performance efficiently.</abstract>
<identifier type="citekey">rahman-caragea-2025-llm</identifier>
<identifier type="doi">10.18653/v1/2025.emnlp-main.1583</identifier>
<location>
<url>https://aclanthology.org/2025.emnlp-main.1583/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>31104</start>
<end>31121</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T LLM-Guided Co-Training for Text Classification
%A Rahman, Md Mezbaur
%A Caragea, Cornelia
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-332-6
%F rahman-caragea-2025-llm
%X In this paper, we introduce a novel weighted co-training approach that is guided by Large Language Models (LLMs). Namely, in our co-training approach, we use LLM labels on unlabeled data as target labels and co-train two encoder-only based networks that train each other over multiple iterations: first, all samples are forwarded through each network and historical estimates of each network’s confidence in the LLM label are recorded; second, a dynamic importance weight is derived for each sample according to each network’s belief (or confidence) in the quality of the LLM label for that sample; finally, the two networks exchange importance weights with each other—each network back-propagates all samples weighted with the importance weights coming from its peer network and updates its own parameters. By strategically utilizing LLM-generated guidance, our approach significantly outperforms conventional SSL methods, particularly in settings with abundant unlabeled data. Empirical results show that it achieves state-of-the-art performance on 4 out of 5 benchmark datasets and ranks first among 14 compared methods according to the Friedman test. Our results highlight a new direction in semi-supervised learning—where LLMs serve as knowledge amplifiers, enabling backbone co-training models to achieve SOTA performance efficiently.
%R 10.18653/v1/2025.emnlp-main.1583
%U https://aclanthology.org/2025.emnlp-main.1583/
%U https://doi.org/10.18653/v1/2025.emnlp-main.1583
%P 31104-31121
Markdown (Informal)
[LLM-Guided Co-Training for Text Classification](https://aclanthology.org/2025.emnlp-main.1583/) (Rahman & Caragea, EMNLP 2025)
ACL
- Md Mezbaur Rahman and Cornelia Caragea. 2025. LLM-Guided Co-Training for Text Classification. In Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing, pages 31104–31121, Suzhou, China. Association for Computational Linguistics.