@inproceedings{nie-etal-2024-know,
title = "Know-Adapter: Towards Knowledge-Aware Parameter-Efficient Transfer Learning for Few-shot Named Entity Recognition",
author = "Nie, Binling and
Shao, Yiming and
Wang, Yigang",
editor = "Calzolari, Nicoletta and
Kan, Min-Yen and
Hoste, Veronique and
Lenci, Alessandro and
Sakti, Sakriani and
Xue, Nianwen",
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://aclanthology.org/2024.lrec-main.854",
pages = "9777--9786",
abstract = "Parameter-Efficient Fine-Tuning (PEFT) is a promising approach to mitigate the challenges about the model adaptation of pretrained language models (PLMs) for the named entity recognition (NER) task. Recent studies have highlighted the improvements that can be made to the quality of information retrieved from PLMs by adding explicit knowledge from external source like KGs to otherwise naive PEFTs. In this paper, we propose a novel knowledgeable adapter, Know-adapter, to incorporate structure and semantic knowledge of knowledge graphs into PLMs for few-shot NER. First, we construct a related KG entity type sequence for each sentence using a knowledge retriever. However, the type system of a domain-specific NER task is typically independent of that of current KGs and thus exhibits heterogeneity issue inevitably, which makes matching between the original NER and KG types (e.g. Person in NER potentially matches President in KBs) less likely, or introduces unintended noises. Thus, then we design a unified taxonomy based on KG ontology for KG entity types and NER labels. This taxonomy is used to build a learnable shared representation module, which provides shared representations for both KG entity type sequences and NER labels. Based on these shared representations, our Know-adapter introduces high semantic relevance knowledge and structure knowledge from KGs as inductive bias to guide the updating process of the adapter. Additionally, the shared representations guide the learnable representation module to reduce noise in the unsupervised expansion of label words. Extensive experiments on multiple NER datasets show the superiority of Know-Adapter over other state-of-the-art methods in both full-resource and low-resource settings.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="nie-etal-2024-know">
<titleInfo>
<title>Know-Adapter: Towards Knowledge-Aware Parameter-Efficient Transfer Learning for Few-shot Named Entity Recognition</title>
</titleInfo>
<name type="personal">
<namePart type="given">Binling</namePart>
<namePart type="family">Nie</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yiming</namePart>
<namePart type="family">Shao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yigang</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nicoletta</namePart>
<namePart type="family">Calzolari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Min-Yen</namePart>
<namePart type="family">Kan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Veronique</namePart>
<namePart type="family">Hoste</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Lenci</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sakriani</namePart>
<namePart type="family">Sakti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nianwen</namePart>
<namePart type="family">Xue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>ELRA and ICCL</publisher>
<place>
<placeTerm type="text">Torino, Italia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Parameter-Efficient Fine-Tuning (PEFT) is a promising approach to mitigate the challenges about the model adaptation of pretrained language models (PLMs) for the named entity recognition (NER) task. Recent studies have highlighted the improvements that can be made to the quality of information retrieved from PLMs by adding explicit knowledge from external source like KGs to otherwise naive PEFTs. In this paper, we propose a novel knowledgeable adapter, Know-adapter, to incorporate structure and semantic knowledge of knowledge graphs into PLMs for few-shot NER. First, we construct a related KG entity type sequence for each sentence using a knowledge retriever. However, the type system of a domain-specific NER task is typically independent of that of current KGs and thus exhibits heterogeneity issue inevitably, which makes matching between the original NER and KG types (e.g. Person in NER potentially matches President in KBs) less likely, or introduces unintended noises. Thus, then we design a unified taxonomy based on KG ontology for KG entity types and NER labels. This taxonomy is used to build a learnable shared representation module, which provides shared representations for both KG entity type sequences and NER labels. Based on these shared representations, our Know-adapter introduces high semantic relevance knowledge and structure knowledge from KGs as inductive bias to guide the updating process of the adapter. Additionally, the shared representations guide the learnable representation module to reduce noise in the unsupervised expansion of label words. Extensive experiments on multiple NER datasets show the superiority of Know-Adapter over other state-of-the-art methods in both full-resource and low-resource settings.</abstract>
<identifier type="citekey">nie-etal-2024-know</identifier>
<location>
<url>https://aclanthology.org/2024.lrec-main.854</url>
</location>
<part>
<date>2024-05</date>
<extent unit="page">
<start>9777</start>
<end>9786</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Know-Adapter: Towards Knowledge-Aware Parameter-Efficient Transfer Learning for Few-shot Named Entity Recognition
%A Nie, Binling
%A Shao, Yiming
%A Wang, Yigang
%Y Calzolari, Nicoletta
%Y Kan, Min-Yen
%Y Hoste, Veronique
%Y Lenci, Alessandro
%Y Sakti, Sakriani
%Y Xue, Nianwen
%S Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)
%D 2024
%8 May
%I ELRA and ICCL
%C Torino, Italia
%F nie-etal-2024-know
%X Parameter-Efficient Fine-Tuning (PEFT) is a promising approach to mitigate the challenges about the model adaptation of pretrained language models (PLMs) for the named entity recognition (NER) task. Recent studies have highlighted the improvements that can be made to the quality of information retrieved from PLMs by adding explicit knowledge from external source like KGs to otherwise naive PEFTs. In this paper, we propose a novel knowledgeable adapter, Know-adapter, to incorporate structure and semantic knowledge of knowledge graphs into PLMs for few-shot NER. First, we construct a related KG entity type sequence for each sentence using a knowledge retriever. However, the type system of a domain-specific NER task is typically independent of that of current KGs and thus exhibits heterogeneity issue inevitably, which makes matching between the original NER and KG types (e.g. Person in NER potentially matches President in KBs) less likely, or introduces unintended noises. Thus, then we design a unified taxonomy based on KG ontology for KG entity types and NER labels. This taxonomy is used to build a learnable shared representation module, which provides shared representations for both KG entity type sequences and NER labels. Based on these shared representations, our Know-adapter introduces high semantic relevance knowledge and structure knowledge from KGs as inductive bias to guide the updating process of the adapter. Additionally, the shared representations guide the learnable representation module to reduce noise in the unsupervised expansion of label words. Extensive experiments on multiple NER datasets show the superiority of Know-Adapter over other state-of-the-art methods in both full-resource and low-resource settings.
%U https://aclanthology.org/2024.lrec-main.854
%P 9777-9786
Markdown (Informal)
[Know-Adapter: Towards Knowledge-Aware Parameter-Efficient Transfer Learning for Few-shot Named Entity Recognition](https://aclanthology.org/2024.lrec-main.854) (Nie et al., LREC-COLING 2024)
ACL