@article{keya-etal-2022-neural,
title = "Neural Embedding Allocation: Distributed Representations of Topic Models",
author = "Keya, Kamrun Naher and
Papanikolaou, Yannis and
Foulds, James R.",
journal = "Computational Linguistics",
volume = "48",
number = "4",
month = dec,
year = "2022",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/2022.cl-4.18",
doi = "10.1162/coli_a_00457",
pages = "1021--1052",
abstract = "We propose a method that uses neural embeddings to improve the performance of any given LDA-style topic model. Our method, called neural embedding allocation (NEA), deconstructs topic models (LDA or otherwise) into interpretable vector-space embeddings of words, topics, documents, authors, and so on, by learning neural embeddings to mimic the topic model. We demonstrate that NEA improves coherence scores of the original topic model by smoothing out the noisy topics when the number of topics is large. Furthermore, we show NEA{'}s effectiveness and generality in deconstructing and smoothing LDA, author-topic models, and the recent mixed membership skip-gram topic model and achieve better performance with the embeddings compared to several state-of-the-art models.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="keya-etal-2022-neural">
<titleInfo>
<title>Neural Embedding Allocation: Distributed Representations of Topic Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kamrun</namePart>
<namePart type="given">Naher</namePart>
<namePart type="family">Keya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yannis</namePart>
<namePart type="family">Papanikolaou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">James</namePart>
<namePart type="given">R</namePart>
<namePart type="family">Foulds</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>We propose a method that uses neural embeddings to improve the performance of any given LDA-style topic model. Our method, called neural embedding allocation (NEA), deconstructs topic models (LDA or otherwise) into interpretable vector-space embeddings of words, topics, documents, authors, and so on, by learning neural embeddings to mimic the topic model. We demonstrate that NEA improves coherence scores of the original topic model by smoothing out the noisy topics when the number of topics is large. Furthermore, we show NEA’s effectiveness and generality in deconstructing and smoothing LDA, author-topic models, and the recent mixed membership skip-gram topic model and achieve better performance with the embeddings compared to several state-of-the-art models.</abstract>
<identifier type="citekey">keya-etal-2022-neural</identifier>
<identifier type="doi">10.1162/coli_a_00457</identifier>
<location>
<url>https://aclanthology.org/2022.cl-4.18</url>
</location>
<part>
<date>2022-12</date>
<detail type="volume"><number>48</number></detail>
<detail type="issue"><number>4</number></detail>
<extent unit="page">
<start>1021</start>
<end>1052</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T Neural Embedding Allocation: Distributed Representations of Topic Models
%A Keya, Kamrun Naher
%A Papanikolaou, Yannis
%A Foulds, James R.
%J Computational Linguistics
%D 2022
%8 December
%V 48
%N 4
%I MIT Press
%C Cambridge, MA
%F keya-etal-2022-neural
%X We propose a method that uses neural embeddings to improve the performance of any given LDA-style topic model. Our method, called neural embedding allocation (NEA), deconstructs topic models (LDA or otherwise) into interpretable vector-space embeddings of words, topics, documents, authors, and so on, by learning neural embeddings to mimic the topic model. We demonstrate that NEA improves coherence scores of the original topic model by smoothing out the noisy topics when the number of topics is large. Furthermore, we show NEA’s effectiveness and generality in deconstructing and smoothing LDA, author-topic models, and the recent mixed membership skip-gram topic model and achieve better performance with the embeddings compared to several state-of-the-art models.
%R 10.1162/coli_a_00457
%U https://aclanthology.org/2022.cl-4.18
%U https://doi.org/10.1162/coli_a_00457
%P 1021-1052
Markdown (Informal)
[Neural Embedding Allocation: Distributed Representations of Topic Models](https://aclanthology.org/2022.cl-4.18) (Keya et al., CL 2022)
ACL