@inproceedings{adhya-etal-2023-neural,
title = "Do Neural Topic Models Really Need Dropout? Analysis of the Effect of Dropout in Topic Modeling",
author = "Adhya, Suman and
Lahiri, Avishek and
Sanyal, Debarshi Kumar",
editor = "Vlachos, Andreas and
Augenstein, Isabelle",
booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.eacl-main.162",
doi = "10.18653/v1/2023.eacl-main.162",
pages = "2220--2229",
abstract = "Dropout is a widely used regularization trick to resolve the overfitting issue in large feedforward neural networks trained on a small dataset, which performs poorly on the held-out test subset. Although the effectiveness of this regularization trick has been extensively studied for convolutional neural networks, there is a lack of analysis of it for unsupervised models and in particular, VAE-based neural topic models. In this paper, we have analyzed the consequences of dropout in the encoder as well as in the decoder of the VAE architecture in three widely used neural topic models, namely, contextualized topic model (CTM), ProdLDA, and embedded topic model (ETM) using four publicly available datasets. We characterize the dropout effect on these models in terms of the quality and predictive performance of the generated topics.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="adhya-etal-2023-neural">
<titleInfo>
<title>Do Neural Topic Models Really Need Dropout? Analysis of the Effect of Dropout in Topic Modeling</title>
</titleInfo>
<name type="personal">
<namePart type="given">Suman</namePart>
<namePart type="family">Adhya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Avishek</namePart>
<namePart type="family">Lahiri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Debarshi</namePart>
<namePart type="given">Kumar</namePart>
<namePart type="family">Sanyal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Andreas</namePart>
<namePart type="family">Vlachos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isabelle</namePart>
<namePart type="family">Augenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dubrovnik, Croatia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Dropout is a widely used regularization trick to resolve the overfitting issue in large feedforward neural networks trained on a small dataset, which performs poorly on the held-out test subset. Although the effectiveness of this regularization trick has been extensively studied for convolutional neural networks, there is a lack of analysis of it for unsupervised models and in particular, VAE-based neural topic models. In this paper, we have analyzed the consequences of dropout in the encoder as well as in the decoder of the VAE architecture in three widely used neural topic models, namely, contextualized topic model (CTM), ProdLDA, and embedded topic model (ETM) using four publicly available datasets. We characterize the dropout effect on these models in terms of the quality and predictive performance of the generated topics.</abstract>
<identifier type="citekey">adhya-etal-2023-neural</identifier>
<identifier type="doi">10.18653/v1/2023.eacl-main.162</identifier>
<location>
<url>https://aclanthology.org/2023.eacl-main.162</url>
</location>
<part>
<date>2023-05</date>
<extent unit="page">
<start>2220</start>
<end>2229</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Do Neural Topic Models Really Need Dropout? Analysis of the Effect of Dropout in Topic Modeling
%A Adhya, Suman
%A Lahiri, Avishek
%A Sanyal, Debarshi Kumar
%Y Vlachos, Andreas
%Y Augenstein, Isabelle
%S Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics
%D 2023
%8 May
%I Association for Computational Linguistics
%C Dubrovnik, Croatia
%F adhya-etal-2023-neural
%X Dropout is a widely used regularization trick to resolve the overfitting issue in large feedforward neural networks trained on a small dataset, which performs poorly on the held-out test subset. Although the effectiveness of this regularization trick has been extensively studied for convolutional neural networks, there is a lack of analysis of it for unsupervised models and in particular, VAE-based neural topic models. In this paper, we have analyzed the consequences of dropout in the encoder as well as in the decoder of the VAE architecture in three widely used neural topic models, namely, contextualized topic model (CTM), ProdLDA, and embedded topic model (ETM) using four publicly available datasets. We characterize the dropout effect on these models in terms of the quality and predictive performance of the generated topics.
%R 10.18653/v1/2023.eacl-main.162
%U https://aclanthology.org/2023.eacl-main.162
%U https://doi.org/10.18653/v1/2023.eacl-main.162
%P 2220-2229
Markdown (Informal)
[Do Neural Topic Models Really Need Dropout? Analysis of the Effect of Dropout in Topic Modeling](https://aclanthology.org/2023.eacl-main.162) (Adhya et al., EACL 2023)
ACL