@inproceedings{paev-etal-2024-introducing,
title = "Introducing Shallow Syntactic Information within the Graph-based Dependency Parsing",
author = "Paev, Nikolay and
Simov, Kiril and
Osenova, Petya",
editor = {Dakota, Daniel and
Jablotschkin, Sarah and
K{\"u}bler, Sandra and
Zinsmeister, Heike},
booktitle = "Proceedings of the 22nd Workshop on Treebanks and Linguistic Theories (TLT 2024)",
month = dec,
year = "2024",
address = "Hamburg,Germany",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.tlt-1.6/",
pages = "46--54",
abstract = "The paper presents a new BERT model, fine-tuned for parsing of Bulgarian texts. This model is extended with a new neural network layer in order to incorporate shallow syntactic information during the training phase. The results show statistically significant improvement over the baseline. Thus, the addition of syntactic knowledge - even partial - makes the model better. Also, some error analysis has been conducted on the results from the parsers. Although the architecture has been designed and tested for Bulgarian, it is also scalable for other languages. This scalability was shown here with some experiments and evaluation on an English treebank with a comparable size."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="paev-etal-2024-introducing">
<titleInfo>
<title>Introducing Shallow Syntactic Information within the Graph-based Dependency Parsing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nikolay</namePart>
<namePart type="family">Paev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kiril</namePart>
<namePart type="family">Simov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Petya</namePart>
<namePart type="family">Osenova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 22nd Workshop on Treebanks and Linguistic Theories (TLT 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Dakota</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sarah</namePart>
<namePart type="family">Jablotschkin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sandra</namePart>
<namePart type="family">Kübler</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Heike</namePart>
<namePart type="family">Zinsmeister</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Hamburg,Germany</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The paper presents a new BERT model, fine-tuned for parsing of Bulgarian texts. This model is extended with a new neural network layer in order to incorporate shallow syntactic information during the training phase. The results show statistically significant improvement over the baseline. Thus, the addition of syntactic knowledge - even partial - makes the model better. Also, some error analysis has been conducted on the results from the parsers. Although the architecture has been designed and tested for Bulgarian, it is also scalable for other languages. This scalability was shown here with some experiments and evaluation on an English treebank with a comparable size.</abstract>
<identifier type="citekey">paev-etal-2024-introducing</identifier>
<location>
<url>https://aclanthology.org/2024.tlt-1.6/</url>
</location>
<part>
<date>2024-12</date>
<extent unit="page">
<start>46</start>
<end>54</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Introducing Shallow Syntactic Information within the Graph-based Dependency Parsing
%A Paev, Nikolay
%A Simov, Kiril
%A Osenova, Petya
%Y Dakota, Daniel
%Y Jablotschkin, Sarah
%Y Kübler, Sandra
%Y Zinsmeister, Heike
%S Proceedings of the 22nd Workshop on Treebanks and Linguistic Theories (TLT 2024)
%D 2024
%8 December
%I Association for Computational Linguistics
%C Hamburg,Germany
%F paev-etal-2024-introducing
%X The paper presents a new BERT model, fine-tuned for parsing of Bulgarian texts. This model is extended with a new neural network layer in order to incorporate shallow syntactic information during the training phase. The results show statistically significant improvement over the baseline. Thus, the addition of syntactic knowledge - even partial - makes the model better. Also, some error analysis has been conducted on the results from the parsers. Although the architecture has been designed and tested for Bulgarian, it is also scalable for other languages. This scalability was shown here with some experiments and evaluation on an English treebank with a comparable size.
%U https://aclanthology.org/2024.tlt-1.6/
%P 46-54
Markdown (Informal)
[Introducing Shallow Syntactic Information within the Graph-based Dependency Parsing](https://aclanthology.org/2024.tlt-1.6/) (Paev et al., TLT 2024)
ACL