@inproceedings{marinova-2019-evaluation,
title = "Evaluation of Stacked Embeddings for {B}ulgarian on the Downstream Tasks {POS} and {NERC}",
author = "Marinova, Iva",
editor = "Kovatchev, Venelin and
Temnikova, Irina and
{\v{S}}andrih, Branislava and
Nikolova, Ivelina",
booktitle = "Proceedings of the Student Research Workshop Associated with RANLP 2019",
month = sep,
year = "2019",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd.",
url = "https://aclanthology.org/R19-2008",
doi = "10.26615/issn.2603-2821.2019_008",
pages = "48--54",
abstract = "This paper reports on experiments with different stacks of word embeddings and evaluation of their usefulness for Bulgarian downstream tasks such as Named Entity Recognition and Classification (NERC) and Part-of-speech (POS) Tagging. Word embeddings stay in the core of the development of NLP, with several key language models being created over the last two years like FastText (CITATION), ElMo (CITATION), BERT (CITATION) and Flair (CITATION). Stacking or combining different word embeddings is another technique used in this paper and still not reported for Bulgarian NERC. Well-established architecture is used for the sequence tagging task such as BI-LSTM-CRF, and different pre-trained language models are combined in the embedding layer to decide which combination of them scores better.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="marinova-2019-evaluation">
<titleInfo>
<title>Evaluation of Stacked Embeddings for Bulgarian on the Downstream Tasks POS and NERC</title>
</titleInfo>
<name type="personal">
<namePart type="given">Iva</namePart>
<namePart type="family">Marinova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Student Research Workshop Associated with RANLP 2019</title>
</titleInfo>
<name type="personal">
<namePart type="given">Venelin</namePart>
<namePart type="family">Kovatchev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Irina</namePart>
<namePart type="family">Temnikova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Branislava</namePart>
<namePart type="family">Šandrih</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivelina</namePart>
<namePart type="family">Nikolova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd.</publisher>
<place>
<placeTerm type="text">Varna, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper reports on experiments with different stacks of word embeddings and evaluation of their usefulness for Bulgarian downstream tasks such as Named Entity Recognition and Classification (NERC) and Part-of-speech (POS) Tagging. Word embeddings stay in the core of the development of NLP, with several key language models being created over the last two years like FastText (CITATION), ElMo (CITATION), BERT (CITATION) and Flair (CITATION). Stacking or combining different word embeddings is another technique used in this paper and still not reported for Bulgarian NERC. Well-established architecture is used for the sequence tagging task such as BI-LSTM-CRF, and different pre-trained language models are combined in the embedding layer to decide which combination of them scores better.</abstract>
<identifier type="citekey">marinova-2019-evaluation</identifier>
<identifier type="doi">10.26615/issn.2603-2821.2019_008</identifier>
<location>
<url>https://aclanthology.org/R19-2008</url>
</location>
<part>
<date>2019-09</date>
<extent unit="page">
<start>48</start>
<end>54</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Evaluation of Stacked Embeddings for Bulgarian on the Downstream Tasks POS and NERC
%A Marinova, Iva
%Y Kovatchev, Venelin
%Y Temnikova, Irina
%Y Šandrih, Branislava
%Y Nikolova, Ivelina
%S Proceedings of the Student Research Workshop Associated with RANLP 2019
%D 2019
%8 September
%I INCOMA Ltd.
%C Varna, Bulgaria
%F marinova-2019-evaluation
%X This paper reports on experiments with different stacks of word embeddings and evaluation of their usefulness for Bulgarian downstream tasks such as Named Entity Recognition and Classification (NERC) and Part-of-speech (POS) Tagging. Word embeddings stay in the core of the development of NLP, with several key language models being created over the last two years like FastText (CITATION), ElMo (CITATION), BERT (CITATION) and Flair (CITATION). Stacking or combining different word embeddings is another technique used in this paper and still not reported for Bulgarian NERC. Well-established architecture is used for the sequence tagging task such as BI-LSTM-CRF, and different pre-trained language models are combined in the embedding layer to decide which combination of them scores better.
%R 10.26615/issn.2603-2821.2019_008
%U https://aclanthology.org/R19-2008
%U https://doi.org/10.26615/issn.2603-2821.2019_008
%P 48-54
Markdown (Informal)
[Evaluation of Stacked Embeddings for Bulgarian on the Downstream Tasks POS and NERC](https://aclanthology.org/R19-2008) (Marinova, RANLP 2019)
ACL