@inproceedings{portelli-etal-2022-ailab,
title = "{AILAB}-{U}dine@{SMM}4{H}{'}22: Limits of Transformers and {BERT} Ensembles",
author = "Portelli, Beatrice and
Scaboro, Simone and
Chersoni, Emmanuele and
Santus, Enrico and
Serra, Giuseppe",
editor = "Gonzalez-Hernandez, Graciela and
Weissenbacher, Davy",
booktitle = "Proceedings of The Seventh Workshop on Social Media Mining for Health Applications, Workshop {\&} Shared Task",
month = oct,
year = "2022",
address = "Gyeongju, Republic of Korea",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.smm4h-1.36",
pages = "130--134",
abstract = "This paper describes the models developed by the AILAB-Udine team for the SMM4H{'}22 Shared Task. We explored the limits of Transformer based models on text classification, entity extraction and entity normalization, tackling Tasks 1, 2, 5, 6 and 10. The main takeaways we got from participating in different tasks are: the overwhelming positive effects of combining different architectures when using ensemble learning, and the great potential of generative models for term normalization.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="portelli-etal-2022-ailab">
<titleInfo>
<title>AILAB-Udine@SMM4H’22: Limits of Transformers and BERT Ensembles</title>
</titleInfo>
<name type="personal">
<namePart type="given">Beatrice</namePart>
<namePart type="family">Portelli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Simone</namePart>
<namePart type="family">Scaboro</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emmanuele</namePart>
<namePart type="family">Chersoni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Enrico</namePart>
<namePart type="family">Santus</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Giuseppe</namePart>
<namePart type="family">Serra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of The Seventh Workshop on Social Media Mining for Health Applications, Workshop & Shared Task</title>
</titleInfo>
<name type="personal">
<namePart type="given">Graciela</namePart>
<namePart type="family">Gonzalez-Hernandez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Davy</namePart>
<namePart type="family">Weissenbacher</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Gyeongju, Republic of Korea</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes the models developed by the AILAB-Udine team for the SMM4H’22 Shared Task. We explored the limits of Transformer based models on text classification, entity extraction and entity normalization, tackling Tasks 1, 2, 5, 6 and 10. The main takeaways we got from participating in different tasks are: the overwhelming positive effects of combining different architectures when using ensemble learning, and the great potential of generative models for term normalization.</abstract>
<identifier type="citekey">portelli-etal-2022-ailab</identifier>
<location>
<url>https://aclanthology.org/2022.smm4h-1.36</url>
</location>
<part>
<date>2022-10</date>
<extent unit="page">
<start>130</start>
<end>134</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T AILAB-Udine@SMM4H’22: Limits of Transformers and BERT Ensembles
%A Portelli, Beatrice
%A Scaboro, Simone
%A Chersoni, Emmanuele
%A Santus, Enrico
%A Serra, Giuseppe
%Y Gonzalez-Hernandez, Graciela
%Y Weissenbacher, Davy
%S Proceedings of The Seventh Workshop on Social Media Mining for Health Applications, Workshop & Shared Task
%D 2022
%8 October
%I Association for Computational Linguistics
%C Gyeongju, Republic of Korea
%F portelli-etal-2022-ailab
%X This paper describes the models developed by the AILAB-Udine team for the SMM4H’22 Shared Task. We explored the limits of Transformer based models on text classification, entity extraction and entity normalization, tackling Tasks 1, 2, 5, 6 and 10. The main takeaways we got from participating in different tasks are: the overwhelming positive effects of combining different architectures when using ensemble learning, and the great potential of generative models for term normalization.
%U https://aclanthology.org/2022.smm4h-1.36
%P 130-134
Markdown (Informal)
[AILAB-Udine@SMM4H’22: Limits of Transformers and BERT Ensembles](https://aclanthology.org/2022.smm4h-1.36) (Portelli et al., SMM4H 2022)
ACL
- Beatrice Portelli, Simone Scaboro, Emmanuele Chersoni, Enrico Santus, and Giuseppe Serra. 2022. AILAB-Udine@SMM4H’22: Limits of Transformers and BERT Ensembles. In Proceedings of The Seventh Workshop on Social Media Mining for Health Applications, Workshop & Shared Task, pages 130–134, Gyeongju, Republic of Korea. Association for Computational Linguistics.