@inproceedings{bouhandi-etal-2022-graph,
title = "Graph Neural Networks for Adapting Off-the-shelf General Domain Language Models to Low-Resource Specialised Domains",
author = "Bouhandi, Merieme and
Morin, Emmanuel and
Hamon, Thierry",
editor = "Wu, Lingfei and
Liu, Bang and
Mihalcea, Rada and
Pei, Jian and
Zhang, Yue and
Li, Yunyao",
booktitle = "Proceedings of the 2nd Workshop on Deep Learning on Graphs for Natural Language Processing (DLG4NLP 2022)",
month = jul,
year = "2022",
address = "Seattle, Washington",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.dlg4nlp-1.5",
doi = "10.18653/v1/2022.dlg4nlp-1.5",
pages = "36--42",
abstract = "Language models encode linguistic proprieties and are used as input for more specific models. Using their word representations as-is for specialised and low-resource domains might be less efficient. Methods of adapting them exist, but these models often overlook global information about how words, terms, and concepts relate to each other in a corpus due to their strong reliance on attention. We consider that global information can influence the results of the downstream tasks, and combination with contextual information is performed using graph convolution networks or GCN built on vocabulary graphs. By outperforming baselines, we show that this architecture is profitable for domain-specific tasks.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bouhandi-etal-2022-graph">
<titleInfo>
<title>Graph Neural Networks for Adapting Off-the-shelf General Domain Language Models to Low-Resource Specialised Domains</title>
</titleInfo>
<name type="personal">
<namePart type="given">Merieme</namePart>
<namePart type="family">Bouhandi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emmanuel</namePart>
<namePart type="family">Morin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thierry</namePart>
<namePart type="family">Hamon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Workshop on Deep Learning on Graphs for Natural Language Processing (DLG4NLP 2022)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lingfei</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rada</namePart>
<namePart type="family">Mihalcea</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jian</namePart>
<namePart type="family">Pei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yue</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yunyao</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, Washington</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Language models encode linguistic proprieties and are used as input for more specific models. Using their word representations as-is for specialised and low-resource domains might be less efficient. Methods of adapting them exist, but these models often overlook global information about how words, terms, and concepts relate to each other in a corpus due to their strong reliance on attention. We consider that global information can influence the results of the downstream tasks, and combination with contextual information is performed using graph convolution networks or GCN built on vocabulary graphs. By outperforming baselines, we show that this architecture is profitable for domain-specific tasks.</abstract>
<identifier type="citekey">bouhandi-etal-2022-graph</identifier>
<identifier type="doi">10.18653/v1/2022.dlg4nlp-1.5</identifier>
<location>
<url>https://aclanthology.org/2022.dlg4nlp-1.5</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>36</start>
<end>42</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Graph Neural Networks for Adapting Off-the-shelf General Domain Language Models to Low-Resource Specialised Domains
%A Bouhandi, Merieme
%A Morin, Emmanuel
%A Hamon, Thierry
%Y Wu, Lingfei
%Y Liu, Bang
%Y Mihalcea, Rada
%Y Pei, Jian
%Y Zhang, Yue
%Y Li, Yunyao
%S Proceedings of the 2nd Workshop on Deep Learning on Graphs for Natural Language Processing (DLG4NLP 2022)
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, Washington
%F bouhandi-etal-2022-graph
%X Language models encode linguistic proprieties and are used as input for more specific models. Using their word representations as-is for specialised and low-resource domains might be less efficient. Methods of adapting them exist, but these models often overlook global information about how words, terms, and concepts relate to each other in a corpus due to their strong reliance on attention. We consider that global information can influence the results of the downstream tasks, and combination with contextual information is performed using graph convolution networks or GCN built on vocabulary graphs. By outperforming baselines, we show that this architecture is profitable for domain-specific tasks.
%R 10.18653/v1/2022.dlg4nlp-1.5
%U https://aclanthology.org/2022.dlg4nlp-1.5
%U https://doi.org/10.18653/v1/2022.dlg4nlp-1.5
%P 36-42
Markdown (Informal)
[Graph Neural Networks for Adapting Off-the-shelf General Domain Language Models to Low-Resource Specialised Domains](https://aclanthology.org/2022.dlg4nlp-1.5) (Bouhandi et al., DLG4NLP 2022)
ACL