@inproceedings{aziz-schulz-2018-variational,
title = "Variational Inference and Deep Generative Models",
author = "Aziz, Wilker and
Schulz, Philip",
editor = "Artzi, Yoav and
Eisenstein, Jacob",
booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P18-5003/",
doi = "10.18653/v1/P18-5003",
pages = "8--9",
abstract = "NLP has seen a surge in neural network models in recent years. These models provide state-of-the-art performance on many supervised tasks. Unsupervised and semi-supervised learning has only been addressed scarcely, however. Deep generative models (DGMs) make it possible to integrate neural networks with probabilistic graphical models. Using DGMs one can easily design latent variable models that account for missing observations and thereby enable unsupervised and semi-supervised learning with neural networks. The method of choice for training these models is variational inference. This tutorial offers a general introduction to variational inference followed by a thorough and example-driven discussion of how to use variational methods for training DGMs. It provides both the mathematical background necessary for deriving the learning algorithms as well as practical implementation guidelines. Importantly, the tutorial will cover models with continuous and discrete variables. We provide practical coding exercises implemented in IPython notebooks as well as short notes on the more intricate mathematical details that the audience can use as a reference after the tutorial. We expect that with these additional materials the tutorial will have a long-lasting impact on the community."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="aziz-schulz-2018-variational">
<titleInfo>
<title>Variational Inference and Deep Generative Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wilker</namePart>
<namePart type="family">Aziz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Philip</namePart>
<namePart type="family">Schulz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Artzi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jacob</namePart>
<namePart type="family">Eisenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>NLP has seen a surge in neural network models in recent years. These models provide state-of-the-art performance on many supervised tasks. Unsupervised and semi-supervised learning has only been addressed scarcely, however. Deep generative models (DGMs) make it possible to integrate neural networks with probabilistic graphical models. Using DGMs one can easily design latent variable models that account for missing observations and thereby enable unsupervised and semi-supervised learning with neural networks. The method of choice for training these models is variational inference. This tutorial offers a general introduction to variational inference followed by a thorough and example-driven discussion of how to use variational methods for training DGMs. It provides both the mathematical background necessary for deriving the learning algorithms as well as practical implementation guidelines. Importantly, the tutorial will cover models with continuous and discrete variables. We provide practical coding exercises implemented in IPython notebooks as well as short notes on the more intricate mathematical details that the audience can use as a reference after the tutorial. We expect that with these additional materials the tutorial will have a long-lasting impact on the community.</abstract>
<identifier type="citekey">aziz-schulz-2018-variational</identifier>
<identifier type="doi">10.18653/v1/P18-5003</identifier>
<location>
<url>https://aclanthology.org/P18-5003/</url>
</location>
<part>
<date>2018-07</date>
<extent unit="page">
<start>8</start>
<end>9</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Variational Inference and Deep Generative Models
%A Aziz, Wilker
%A Schulz, Philip
%Y Artzi, Yoav
%Y Eisenstein, Jacob
%S Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts
%D 2018
%8 July
%I Association for Computational Linguistics
%C Melbourne, Australia
%F aziz-schulz-2018-variational
%X NLP has seen a surge in neural network models in recent years. These models provide state-of-the-art performance on many supervised tasks. Unsupervised and semi-supervised learning has only been addressed scarcely, however. Deep generative models (DGMs) make it possible to integrate neural networks with probabilistic graphical models. Using DGMs one can easily design latent variable models that account for missing observations and thereby enable unsupervised and semi-supervised learning with neural networks. The method of choice for training these models is variational inference. This tutorial offers a general introduction to variational inference followed by a thorough and example-driven discussion of how to use variational methods for training DGMs. It provides both the mathematical background necessary for deriving the learning algorithms as well as practical implementation guidelines. Importantly, the tutorial will cover models with continuous and discrete variables. We provide practical coding exercises implemented in IPython notebooks as well as short notes on the more intricate mathematical details that the audience can use as a reference after the tutorial. We expect that with these additional materials the tutorial will have a long-lasting impact on the community.
%R 10.18653/v1/P18-5003
%U https://aclanthology.org/P18-5003/
%U https://doi.org/10.18653/v1/P18-5003
%P 8-9
Markdown (Informal)
[Variational Inference and Deep Generative Models](https://aclanthology.org/P18-5003/) (Aziz & Schulz, ACL 2018)
ACL
- Wilker Aziz and Philip Schulz. 2018. Variational Inference and Deep Generative Models. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts, pages 8–9, Melbourne, Australia. Association for Computational Linguistics.