@inproceedings{pereg-etal-2020-syntactically,
title = "Syntactically Aware Cross-Domain Aspect and Opinion Terms Extraction",
author = "Pereg, Oren and
Korat, Daniel and
Wasserblat, Moshe",
editor = "Scott, Donia and
Bel, Nuria and
Zong, Chengqing",
booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "International Committee on Computational Linguistics",
url = "https://aclanthology.org/2020.coling-main.158",
doi = "10.18653/v1/2020.coling-main.158",
pages = "1772--1777",
abstract = "A fundamental task of fine-grained sentiment analysis is aspect and opinion terms extraction. Supervised-learning approaches have shown good results for this task; however, they fail to scale across domains where labeled data is lacking. Non pre-trained unsupervised domain adaptation methods that incorporate external linguistic knowledge have proven effective in transferring aspect and opinion knowledge from a labeled source domain to un-labeled target domains; however, pre-trained transformer-based models like BERT and RoBERTa already exhibit substantial syntactic knowledge. In this paper, we propose a method for incorporating external linguistic information into a self-attention mechanism coupled with the BERT model. This enables leveraging the intrinsic knowledge existing within BERT together with externally introduced syntactic information, to bridge the gap across domains. We successfully demonstrate enhanced results on three benchmark datasets.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="pereg-etal-2020-syntactically">
<titleInfo>
<title>Syntactically Aware Cross-Domain Aspect and Opinion Terms Extraction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Oren</namePart>
<namePart type="family">Pereg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Korat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Moshe</namePart>
<namePart type="family">Wasserblat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 28th International Conference on Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Donia</namePart>
<namePart type="family">Scott</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nuria</namePart>
<namePart type="family">Bel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chengqing</namePart>
<namePart type="family">Zong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>International Committee on Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona, Spain (Online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>A fundamental task of fine-grained sentiment analysis is aspect and opinion terms extraction. Supervised-learning approaches have shown good results for this task; however, they fail to scale across domains where labeled data is lacking. Non pre-trained unsupervised domain adaptation methods that incorporate external linguistic knowledge have proven effective in transferring aspect and opinion knowledge from a labeled source domain to un-labeled target domains; however, pre-trained transformer-based models like BERT and RoBERTa already exhibit substantial syntactic knowledge. In this paper, we propose a method for incorporating external linguistic information into a self-attention mechanism coupled with the BERT model. This enables leveraging the intrinsic knowledge existing within BERT together with externally introduced syntactic information, to bridge the gap across domains. We successfully demonstrate enhanced results on three benchmark datasets.</abstract>
<identifier type="citekey">pereg-etal-2020-syntactically</identifier>
<identifier type="doi">10.18653/v1/2020.coling-main.158</identifier>
<location>
<url>https://aclanthology.org/2020.coling-main.158</url>
</location>
<part>
<date>2020-12</date>
<extent unit="page">
<start>1772</start>
<end>1777</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Syntactically Aware Cross-Domain Aspect and Opinion Terms Extraction
%A Pereg, Oren
%A Korat, Daniel
%A Wasserblat, Moshe
%Y Scott, Donia
%Y Bel, Nuria
%Y Zong, Chengqing
%S Proceedings of the 28th International Conference on Computational Linguistics
%D 2020
%8 December
%I International Committee on Computational Linguistics
%C Barcelona, Spain (Online)
%F pereg-etal-2020-syntactically
%X A fundamental task of fine-grained sentiment analysis is aspect and opinion terms extraction. Supervised-learning approaches have shown good results for this task; however, they fail to scale across domains where labeled data is lacking. Non pre-trained unsupervised domain adaptation methods that incorporate external linguistic knowledge have proven effective in transferring aspect and opinion knowledge from a labeled source domain to un-labeled target domains; however, pre-trained transformer-based models like BERT and RoBERTa already exhibit substantial syntactic knowledge. In this paper, we propose a method for incorporating external linguistic information into a self-attention mechanism coupled with the BERT model. This enables leveraging the intrinsic knowledge existing within BERT together with externally introduced syntactic information, to bridge the gap across domains. We successfully demonstrate enhanced results on three benchmark datasets.
%R 10.18653/v1/2020.coling-main.158
%U https://aclanthology.org/2020.coling-main.158
%U https://doi.org/10.18653/v1/2020.coling-main.158
%P 1772-1777
Markdown (Informal)
[Syntactically Aware Cross-Domain Aspect and Opinion Terms Extraction](https://aclanthology.org/2020.coling-main.158) (Pereg et al., COLING 2020)
ACL