@inproceedings{jerdhaf-etal-2022-evaluating,
title = "Evaluating Pre-Trained Language Models for Focused Terminology Extraction from {S}wedish Medical Records",
author = "Jerdhaf, Oskar and
Santini, Marina and
Lundberg, Peter and
Bjerner, Tomas and
Al-Abasse, Yosef and
Jonsson, Arne and
Vakili, Thomas",
editor = "Costa, Rute and
Carvalho, Sara and
Ani{\'c}, Ana Ostro{\v{s}}ki and
Khan, Anas Fahad",
booktitle = "Proceedings of the Workshop on Terminology in the 21st century: many faces, many places",
month = jun,
year = "2022",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://aclanthology.org/2022.term-1.6",
pages = "30--32",
abstract = "In the experiments briefly presented in this abstract, we compare the performance of a generalist Swedish pre-trained language model with a domain-specific Swedish pre-trained model on the downstream task of focussed terminology extraction of implant terms, which are terms that indicate the presence of implants in the body of patients. The fine-tuning is identical for both models. For the search strategy we rely on KD-Tree that we feed with two different lists of term seeds, one with noise and one without noise. Results shows that the use of a domain-specific pre-trained language model has a positive impact on focussed terminology extraction only when using term seeds without noise.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jerdhaf-etal-2022-evaluating">
<titleInfo>
<title>Evaluating Pre-Trained Language Models for Focused Terminology Extraction from Swedish Medical Records</title>
</titleInfo>
<name type="personal">
<namePart type="given">Oskar</namePart>
<namePart type="family">Jerdhaf</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marina</namePart>
<namePart type="family">Santini</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peter</namePart>
<namePart type="family">Lundberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tomas</namePart>
<namePart type="family">Bjerner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yosef</namePart>
<namePart type="family">Al-Abasse</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arne</namePart>
<namePart type="family">Jonsson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thomas</namePart>
<namePart type="family">Vakili</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on Terminology in the 21st century: many faces, many places</title>
</titleInfo>
<name type="personal">
<namePart type="given">Rute</namePart>
<namePart type="family">Costa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sara</namePart>
<namePart type="family">Carvalho</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ana</namePart>
<namePart type="given">Ostroški</namePart>
<namePart type="family">Anić</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anas</namePart>
<namePart type="given">Fahad</namePart>
<namePart type="family">Khan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>European Language Resources Association</publisher>
<place>
<placeTerm type="text">Marseille, France</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In the experiments briefly presented in this abstract, we compare the performance of a generalist Swedish pre-trained language model with a domain-specific Swedish pre-trained model on the downstream task of focussed terminology extraction of implant terms, which are terms that indicate the presence of implants in the body of patients. The fine-tuning is identical for both models. For the search strategy we rely on KD-Tree that we feed with two different lists of term seeds, one with noise and one without noise. Results shows that the use of a domain-specific pre-trained language model has a positive impact on focussed terminology extraction only when using term seeds without noise.</abstract>
<identifier type="citekey">jerdhaf-etal-2022-evaluating</identifier>
<location>
<url>https://aclanthology.org/2022.term-1.6</url>
</location>
<part>
<date>2022-06</date>
<extent unit="page">
<start>30</start>
<end>32</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Evaluating Pre-Trained Language Models for Focused Terminology Extraction from Swedish Medical Records
%A Jerdhaf, Oskar
%A Santini, Marina
%A Lundberg, Peter
%A Bjerner, Tomas
%A Al-Abasse, Yosef
%A Jonsson, Arne
%A Vakili, Thomas
%Y Costa, Rute
%Y Carvalho, Sara
%Y Anić, Ana Ostroški
%Y Khan, Anas Fahad
%S Proceedings of the Workshop on Terminology in the 21st century: many faces, many places
%D 2022
%8 June
%I European Language Resources Association
%C Marseille, France
%F jerdhaf-etal-2022-evaluating
%X In the experiments briefly presented in this abstract, we compare the performance of a generalist Swedish pre-trained language model with a domain-specific Swedish pre-trained model on the downstream task of focussed terminology extraction of implant terms, which are terms that indicate the presence of implants in the body of patients. The fine-tuning is identical for both models. For the search strategy we rely on KD-Tree that we feed with two different lists of term seeds, one with noise and one without noise. Results shows that the use of a domain-specific pre-trained language model has a positive impact on focussed terminology extraction only when using term seeds without noise.
%U https://aclanthology.org/2022.term-1.6
%P 30-32
Markdown (Informal)
[Evaluating Pre-Trained Language Models for Focused Terminology Extraction from Swedish Medical Records](https://aclanthology.org/2022.term-1.6) (Jerdhaf et al., TERM 2022)
ACL
- Oskar Jerdhaf, Marina Santini, Peter Lundberg, Tomas Bjerner, Yosef Al-Abasse, Arne Jonsson, and Thomas Vakili. 2022. Evaluating Pre-Trained Language Models for Focused Terminology Extraction from Swedish Medical Records. In Proceedings of the Workshop on Terminology in the 21st century: many faces, many places, pages 30–32, Marseille, France. European Language Resources Association.