@inproceedings{das-etal-2022-enolp,
title = "Enolp musk@{SMM}4{H}{'}22 : Leveraging Pre-trained Language Models for Stance And Premise Classification",
author = "Das, Millon and
Mangrulkar, Archit and
Manchanda, Ishan and
Kapadnis, Manav and
Patnaik, Sohan",
editor = "Gonzalez-Hernandez, Graciela and
Weissenbacher, Davy",
booktitle = "Proceedings of The Seventh Workshop on Social Media Mining for Health Applications, Workshop {\&} Shared Task",
month = oct,
year = "2022",
address = "Gyeongju, Republic of Korea",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.smm4h-1.42",
pages = "156--159",
abstract = "This paper covers our approaches for the Social Media Mining for Health (SMM4H) Shared Tasks 2a and 2b. Apart from the baseline architectures, we experiment with Parts of Speech (PoS), dependency parsing, and Tf-Idf features. Additionally, we perform contrastive pretraining on our best models using a supervised contrastive loss function. In both the tasks, we outperformed the mean and median scores and ranked first on the validation set. For stance classification, we achieved an F1-score of 0.636 using the CovidTwitterBERT model, while for premise classification, we achieved an F1-score of 0.664 using BART-base model on test dataset.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="das-etal-2022-enolp">
<titleInfo>
<title>Enolp musk@SMM4H’22 : Leveraging Pre-trained Language Models for Stance And Premise Classification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Millon</namePart>
<namePart type="family">Das</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Archit</namePart>
<namePart type="family">Mangrulkar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ishan</namePart>
<namePart type="family">Manchanda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Manav</namePart>
<namePart type="family">Kapadnis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sohan</namePart>
<namePart type="family">Patnaik</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of The Seventh Workshop on Social Media Mining for Health Applications, Workshop & Shared Task</title>
</titleInfo>
<name type="personal">
<namePart type="given">Graciela</namePart>
<namePart type="family">Gonzalez-Hernandez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Davy</namePart>
<namePart type="family">Weissenbacher</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Gyeongju, Republic of Korea</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper covers our approaches for the Social Media Mining for Health (SMM4H) Shared Tasks 2a and 2b. Apart from the baseline architectures, we experiment with Parts of Speech (PoS), dependency parsing, and Tf-Idf features. Additionally, we perform contrastive pretraining on our best models using a supervised contrastive loss function. In both the tasks, we outperformed the mean and median scores and ranked first on the validation set. For stance classification, we achieved an F1-score of 0.636 using the CovidTwitterBERT model, while for premise classification, we achieved an F1-score of 0.664 using BART-base model on test dataset.</abstract>
<identifier type="citekey">das-etal-2022-enolp</identifier>
<location>
<url>https://aclanthology.org/2022.smm4h-1.42</url>
</location>
<part>
<date>2022-10</date>
<extent unit="page">
<start>156</start>
<end>159</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Enolp musk@SMM4H’22 : Leveraging Pre-trained Language Models for Stance And Premise Classification
%A Das, Millon
%A Mangrulkar, Archit
%A Manchanda, Ishan
%A Kapadnis, Manav
%A Patnaik, Sohan
%Y Gonzalez-Hernandez, Graciela
%Y Weissenbacher, Davy
%S Proceedings of The Seventh Workshop on Social Media Mining for Health Applications, Workshop & Shared Task
%D 2022
%8 October
%I Association for Computational Linguistics
%C Gyeongju, Republic of Korea
%F das-etal-2022-enolp
%X This paper covers our approaches for the Social Media Mining for Health (SMM4H) Shared Tasks 2a and 2b. Apart from the baseline architectures, we experiment with Parts of Speech (PoS), dependency parsing, and Tf-Idf features. Additionally, we perform contrastive pretraining on our best models using a supervised contrastive loss function. In both the tasks, we outperformed the mean and median scores and ranked first on the validation set. For stance classification, we achieved an F1-score of 0.636 using the CovidTwitterBERT model, while for premise classification, we achieved an F1-score of 0.664 using BART-base model on test dataset.
%U https://aclanthology.org/2022.smm4h-1.42
%P 156-159
Markdown (Informal)
[Enolp musk@SMM4H’22 : Leveraging Pre-trained Language Models for Stance And Premise Classification](https://aclanthology.org/2022.smm4h-1.42) (Das et al., SMM4H 2022)
ACL