@inproceedings{hong-wu-2021-cyut,
title = "{CYUT} at {ROCLING}-2021 Shared Task: Based on {BERT} and {M}ac{BERT}",
author = "Hong, Xie-Sheng and
Wu, Shih-Hung",
editor = "Lee, Lung-Hao and
Chang, Chia-Hui and
Chen, Kuan-Yu",
booktitle = "Proceedings of the 33rd Conference on Computational Linguistics and Speech Processing (ROCLING 2021)",
month = oct,
year = "2021",
address = "Taoyuan, Taiwan",
publisher = "The Association for Computational Linguistics and Chinese Language Processing (ACLCLP)",
url = "https://aclanthology.org/2021.rocling-1.48",
pages = "367--374",
abstract = "This paper present a description for the ROCLING 2021 shared task in dimensional sentiment analysis for educational texts. We submitted two runs in the final test. Both runs use the standard regression model. The Run1 uses Chinese version of BERT as the base, and in Run2 we use the early version of MacBERT that Chinese version of RoBERTa-like BERT model, RoBERTa-wwm-ext. Using powerful pre-training model of BERT for text embedding to help train the model.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hong-wu-2021-cyut">
<titleInfo>
<title>CYUT at ROCLING-2021 Shared Task: Based on BERT and MacBERT</title>
</titleInfo>
<name type="personal">
<namePart type="given">Xie-Sheng</namePart>
<namePart type="family">Hong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shih-Hung</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 33rd Conference on Computational Linguistics and Speech Processing (ROCLING 2021)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lung-Hao</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chia-Hui</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kuan-Yu</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>The Association for Computational Linguistics and Chinese Language Processing (ACLCLP)</publisher>
<place>
<placeTerm type="text">Taoyuan, Taiwan</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper present a description for the ROCLING 2021 shared task in dimensional sentiment analysis for educational texts. We submitted two runs in the final test. Both runs use the standard regression model. The Run1 uses Chinese version of BERT as the base, and in Run2 we use the early version of MacBERT that Chinese version of RoBERTa-like BERT model, RoBERTa-wwm-ext. Using powerful pre-training model of BERT for text embedding to help train the model.</abstract>
<identifier type="citekey">hong-wu-2021-cyut</identifier>
<location>
<url>https://aclanthology.org/2021.rocling-1.48</url>
</location>
<part>
<date>2021-10</date>
<extent unit="page">
<start>367</start>
<end>374</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T CYUT at ROCLING-2021 Shared Task: Based on BERT and MacBERT
%A Hong, Xie-Sheng
%A Wu, Shih-Hung
%Y Lee, Lung-Hao
%Y Chang, Chia-Hui
%Y Chen, Kuan-Yu
%S Proceedings of the 33rd Conference on Computational Linguistics and Speech Processing (ROCLING 2021)
%D 2021
%8 October
%I The Association for Computational Linguistics and Chinese Language Processing (ACLCLP)
%C Taoyuan, Taiwan
%F hong-wu-2021-cyut
%X This paper present a description for the ROCLING 2021 shared task in dimensional sentiment analysis for educational texts. We submitted two runs in the final test. Both runs use the standard regression model. The Run1 uses Chinese version of BERT as the base, and in Run2 we use the early version of MacBERT that Chinese version of RoBERTa-like BERT model, RoBERTa-wwm-ext. Using powerful pre-training model of BERT for text embedding to help train the model.
%U https://aclanthology.org/2021.rocling-1.48
%P 367-374
Markdown (Informal)
[CYUT at ROCLING-2021 Shared Task: Based on BERT and MacBERT](https://aclanthology.org/2021.rocling-1.48) (Hong & Wu, ROCLING 2021)
ACL
- Xie-Sheng Hong and Shih-Hung Wu. 2021. CYUT at ROCLING-2021 Shared Task: Based on BERT and MacBERT. In Proceedings of the 33rd Conference on Computational Linguistics and Speech Processing (ROCLING 2021), pages 367–374, Taoyuan, Taiwan. The Association for Computational Linguistics and Chinese Language Processing (ACLCLP).