@inproceedings{zhang-etal-2025-self,
title = "A Self-Denoising Model for Robust Few-Shot Relation Extraction",
author = "Zhang, Liang and
Zhang, Yang and
Lu, Ziyao and
Meng, Fandong and
Zhou, Jie and
Su, Jinsong",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.acl-long.1299/",
doi = "10.18653/v1/2025.acl-long.1299",
pages = "26782--26797",
ISBN = "979-8-89176-251-0",
abstract = "The few-shot relation extraction (FSRE) aims at enhancing the model{'}s generalization to new relations with very few labeled instances (support instances). Most existing studies use prototype networks (ProtoNets) for FSRE and assume that the support set, adapting the model to new relations, only contains accurately labeled instances. However, this assumption is usually unrealistic, as even carefully-annotated datasets often contain mislabeled instances. Thus, it is essential to enhance the robustness of FSRE models to noisy labels in support set, but this issue remains unexplored. In this paper, we first conduct a preliminary study, revealing the high sensitivity of ProtoNets to such noisy labels. Meanwhile, we discover that fully leveraging mislabeled support instances is crucial for enhancing the model{'}s robustness. To do this, we propose a self-denoising model for FSRE, which can automatically correct noisy labels of support instances. Specifically, our model comprises two core components: 1) a label correction module (LCM), used to correct mislabeled support instances based on the distances between them in the embedding space, and 2) a relation classification module (RCM), designed to achieve more robust relation prediction using the corrected labels generated by the LCM. Moreover, we propose a feedback-based training strategy, which focuses on training LCM and RCM to synergistically handle noisy labels in support set. Experimental results on two public datasets show the effectiveness and robustness of our model. Notably, even in scenarios without noisy labels, our model significantly outperforms all competitive baselines."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhang-etal-2025-self">
<titleInfo>
<title>A Self-Denoising Model for Robust Few-Shot Relation Extraction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Liang</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ziyao</namePart>
<namePart type="family">Lu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fandong</namePart>
<namePart type="family">Meng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jie</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jinsong</namePart>
<namePart type="family">Su</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-251-0</identifier>
</relatedItem>
<abstract>The few-shot relation extraction (FSRE) aims at enhancing the model’s generalization to new relations with very few labeled instances (support instances). Most existing studies use prototype networks (ProtoNets) for FSRE and assume that the support set, adapting the model to new relations, only contains accurately labeled instances. However, this assumption is usually unrealistic, as even carefully-annotated datasets often contain mislabeled instances. Thus, it is essential to enhance the robustness of FSRE models to noisy labels in support set, but this issue remains unexplored. In this paper, we first conduct a preliminary study, revealing the high sensitivity of ProtoNets to such noisy labels. Meanwhile, we discover that fully leveraging mislabeled support instances is crucial for enhancing the model’s robustness. To do this, we propose a self-denoising model for FSRE, which can automatically correct noisy labels of support instances. Specifically, our model comprises two core components: 1) a label correction module (LCM), used to correct mislabeled support instances based on the distances between them in the embedding space, and 2) a relation classification module (RCM), designed to achieve more robust relation prediction using the corrected labels generated by the LCM. Moreover, we propose a feedback-based training strategy, which focuses on training LCM and RCM to synergistically handle noisy labels in support set. Experimental results on two public datasets show the effectiveness and robustness of our model. Notably, even in scenarios without noisy labels, our model significantly outperforms all competitive baselines.</abstract>
<identifier type="citekey">zhang-etal-2025-self</identifier>
<identifier type="doi">10.18653/v1/2025.acl-long.1299</identifier>
<location>
<url>https://aclanthology.org/2025.acl-long.1299/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>26782</start>
<end>26797</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Self-Denoising Model for Robust Few-Shot Relation Extraction
%A Zhang, Liang
%A Zhang, Yang
%A Lu, Ziyao
%A Meng, Fandong
%A Zhou, Jie
%A Su, Jinsong
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-251-0
%F zhang-etal-2025-self
%X The few-shot relation extraction (FSRE) aims at enhancing the model’s generalization to new relations with very few labeled instances (support instances). Most existing studies use prototype networks (ProtoNets) for FSRE and assume that the support set, adapting the model to new relations, only contains accurately labeled instances. However, this assumption is usually unrealistic, as even carefully-annotated datasets often contain mislabeled instances. Thus, it is essential to enhance the robustness of FSRE models to noisy labels in support set, but this issue remains unexplored. In this paper, we first conduct a preliminary study, revealing the high sensitivity of ProtoNets to such noisy labels. Meanwhile, we discover that fully leveraging mislabeled support instances is crucial for enhancing the model’s robustness. To do this, we propose a self-denoising model for FSRE, which can automatically correct noisy labels of support instances. Specifically, our model comprises two core components: 1) a label correction module (LCM), used to correct mislabeled support instances based on the distances between them in the embedding space, and 2) a relation classification module (RCM), designed to achieve more robust relation prediction using the corrected labels generated by the LCM. Moreover, we propose a feedback-based training strategy, which focuses on training LCM and RCM to synergistically handle noisy labels in support set. Experimental results on two public datasets show the effectiveness and robustness of our model. Notably, even in scenarios without noisy labels, our model significantly outperforms all competitive baselines.
%R 10.18653/v1/2025.acl-long.1299
%U https://aclanthology.org/2025.acl-long.1299/
%U https://doi.org/10.18653/v1/2025.acl-long.1299
%P 26782-26797
Markdown (Informal)
[A Self-Denoising Model for Robust Few-Shot Relation Extraction](https://aclanthology.org/2025.acl-long.1299/) (Zhang et al., ACL 2025)
ACL
- Liang Zhang, Yang Zhang, Ziyao Lu, Fandong Meng, Jie Zhou, and Jinsong Su. 2025. A Self-Denoising Model for Robust Few-Shot Relation Extraction. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 26782–26797, Vienna, Austria. Association for Computational Linguistics.