@inproceedings{wan-etal-2023-relation,
title = "Relation Extraction with Weighted Contrastive Pre-training on Distant Supervision",
author = "Wan, Zhen and
Cheng, Fei and
Liu, Qianying and
Mao, Zhuoyuan and
Song, Haiyue and
Kurohashi, Sadao",
booktitle = "Findings of the Association for Computational Linguistics: EACL 2023",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-eacl.195",
doi = "10.18653/v1/2023.findings-eacl.195",
pages = "2580--2585",
abstract = "Contrastive pre-training on distant supervision has shown remarkable effectiveness in improving supervised relation extraction tasks. However, the existing methods ignore the intrinsic noise of distant supervision during the pre-training stage. In this paper, we propose a weighted contrastive learning method by leveraging the supervised data to estimate the reliability of pre-training instances and explicitly reduce the effect of noise. Experimental results on three supervised datasets demonstrate the advantages of our proposed weighted contrastive learning approach compared to two state-of-the-art non-weighted baselines. Our code and models are available at: \url{https://github.com/YukinoWan/WCL}.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wan-etal-2023-relation">
<titleInfo>
<title>Relation Extraction with Weighted Contrastive Pre-training on Distant Supervision</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhen</namePart>
<namePart type="family">Wan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fei</namePart>
<namePart type="family">Cheng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qianying</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhuoyuan</namePart>
<namePart type="family">Mao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Haiyue</namePart>
<namePart type="family">Song</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sadao</namePart>
<namePart type="family">Kurohashi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EACL 2023</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dubrovnik, Croatia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Contrastive pre-training on distant supervision has shown remarkable effectiveness in improving supervised relation extraction tasks. However, the existing methods ignore the intrinsic noise of distant supervision during the pre-training stage. In this paper, we propose a weighted contrastive learning method by leveraging the supervised data to estimate the reliability of pre-training instances and explicitly reduce the effect of noise. Experimental results on three supervised datasets demonstrate the advantages of our proposed weighted contrastive learning approach compared to two state-of-the-art non-weighted baselines. Our code and models are available at: https://github.com/YukinoWan/WCL.</abstract>
<identifier type="citekey">wan-etal-2023-relation</identifier>
<identifier type="doi">10.18653/v1/2023.findings-eacl.195</identifier>
<location>
<url>https://aclanthology.org/2023.findings-eacl.195</url>
</location>
<part>
<date>2023-05</date>
<extent unit="page">
<start>2580</start>
<end>2585</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Relation Extraction with Weighted Contrastive Pre-training on Distant Supervision
%A Wan, Zhen
%A Cheng, Fei
%A Liu, Qianying
%A Mao, Zhuoyuan
%A Song, Haiyue
%A Kurohashi, Sadao
%S Findings of the Association for Computational Linguistics: EACL 2023
%D 2023
%8 May
%I Association for Computational Linguistics
%C Dubrovnik, Croatia
%F wan-etal-2023-relation
%X Contrastive pre-training on distant supervision has shown remarkable effectiveness in improving supervised relation extraction tasks. However, the existing methods ignore the intrinsic noise of distant supervision during the pre-training stage. In this paper, we propose a weighted contrastive learning method by leveraging the supervised data to estimate the reliability of pre-training instances and explicitly reduce the effect of noise. Experimental results on three supervised datasets demonstrate the advantages of our proposed weighted contrastive learning approach compared to two state-of-the-art non-weighted baselines. Our code and models are available at: https://github.com/YukinoWan/WCL.
%R 10.18653/v1/2023.findings-eacl.195
%U https://aclanthology.org/2023.findings-eacl.195
%U https://doi.org/10.18653/v1/2023.findings-eacl.195
%P 2580-2585
Markdown (Informal)
[Relation Extraction with Weighted Contrastive Pre-training on Distant Supervision](https://aclanthology.org/2023.findings-eacl.195) (Wan et al., Findings 2023)
ACL