@inproceedings{prokhorov-etal-2019-importance,
    title = "On the Importance of the {K}ullback-{L}eibler Divergence Term in Variational Autoencoders for Text Generation",
    author = "Prokhorov, Victor  and
      Shareghi, Ehsan  and
      Li, Yingzhen  and
      Pilehvar, Mohammad Taher  and
      Collier, Nigel",
    editor = "Birch, Alexandra  and
      Finch, Andrew  and
      Hayashi, Hiroaki  and
      Konstas, Ioannis  and
      Luong, Thang  and
      Neubig, Graham  and
      Oda, Yusuke  and
      Sudoh, Katsuhito",
    booktitle = "Proceedings of the 3rd Workshop on Neural Generation and Translation",
    month = nov,
    year = "2019",
    address = "Hong Kong",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/D19-5612/",
    doi = "10.18653/v1/D19-5612",
    pages = "118--127",
    abstract = "Variational Autoencoders (VAEs) are known to suffer from learning uninformative latent representation of the input due to issues such as approximated posterior collapse, or entanglement of the latent space. We impose an explicit constraint on the Kullback-Leibler (KL) divergence term inside the VAE objective function. While the explicit constraint naturally avoids posterior collapse, we use it to further understand the significance of the KL term in controlling the information transmitted through the VAE channel. Within this framework, we explore different properties of the estimated posterior distribution, and highlight the trade-off between the amount of information encoded in a latent code during training, and the generative capacity of the model."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="prokhorov-etal-2019-importance">
    <titleInfo>
        <title>On the Importance of the Kullback-Leibler Divergence Term in Variational Autoencoders for Text Generation</title>
    </titleInfo>
    <name type="personal">
        <namePart type="given">Victor</namePart>
        <namePart type="family">Prokhorov</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Ehsan</namePart>
        <namePart type="family">Shareghi</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Yingzhen</namePart>
        <namePart type="family">Li</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Mohammad</namePart>
        <namePart type="given">Taher</namePart>
        <namePart type="family">Pilehvar</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Nigel</namePart>
        <namePart type="family">Collier</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <originInfo>
        <dateIssued>2019-11</dateIssued>
    </originInfo>
    <typeOfResource>text</typeOfResource>
    <relatedItem type="host">
        <titleInfo>
            <title>Proceedings of the 3rd Workshop on Neural Generation and Translation</title>
        </titleInfo>
        <name type="personal">
            <namePart type="given">Alexandra</namePart>
            <namePart type="family">Birch</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <name type="personal">
            <namePart type="given">Andrew</namePart>
            <namePart type="family">Finch</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <name type="personal">
            <namePart type="given">Hiroaki</namePart>
            <namePart type="family">Hayashi</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <name type="personal">
            <namePart type="given">Ioannis</namePart>
            <namePart type="family">Konstas</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <name type="personal">
            <namePart type="given">Thang</namePart>
            <namePart type="family">Luong</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <name type="personal">
            <namePart type="given">Graham</namePart>
            <namePart type="family">Neubig</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <name type="personal">
            <namePart type="given">Yusuke</namePart>
            <namePart type="family">Oda</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <name type="personal">
            <namePart type="given">Katsuhito</namePart>
            <namePart type="family">Sudoh</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <originInfo>
            <publisher>Association for Computational Linguistics</publisher>
            <place>
                <placeTerm type="text">Hong Kong</placeTerm>
            </place>
        </originInfo>
        <genre authority="marcgt">conference publication</genre>
    </relatedItem>
    <abstract>Variational Autoencoders (VAEs) are known to suffer from learning uninformative latent representation of the input due to issues such as approximated posterior collapse, or entanglement of the latent space. We impose an explicit constraint on the Kullback-Leibler (KL) divergence term inside the VAE objective function. While the explicit constraint naturally avoids posterior collapse, we use it to further understand the significance of the KL term in controlling the information transmitted through the VAE channel. Within this framework, we explore different properties of the estimated posterior distribution, and highlight the trade-off between the amount of information encoded in a latent code during training, and the generative capacity of the model.</abstract>
    <identifier type="citekey">prokhorov-etal-2019-importance</identifier>
    <identifier type="doi">10.18653/v1/D19-5612</identifier>
    <location>
        <url>https://aclanthology.org/D19-5612/</url>
    </location>
    <part>
        <date>2019-11</date>
        <extent unit="page">
            <start>118</start>
            <end>127</end>
        </extent>
    </part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T On the Importance of the Kullback-Leibler Divergence Term in Variational Autoencoders for Text Generation
%A Prokhorov, Victor
%A Shareghi, Ehsan
%A Li, Yingzhen
%A Pilehvar, Mohammad Taher
%A Collier, Nigel
%Y Birch, Alexandra
%Y Finch, Andrew
%Y Hayashi, Hiroaki
%Y Konstas, Ioannis
%Y Luong, Thang
%Y Neubig, Graham
%Y Oda, Yusuke
%Y Sudoh, Katsuhito
%S Proceedings of the 3rd Workshop on Neural Generation and Translation
%D 2019
%8 November
%I Association for Computational Linguistics
%C Hong Kong
%F prokhorov-etal-2019-importance
%X Variational Autoencoders (VAEs) are known to suffer from learning uninformative latent representation of the input due to issues such as approximated posterior collapse, or entanglement of the latent space. We impose an explicit constraint on the Kullback-Leibler (KL) divergence term inside the VAE objective function. While the explicit constraint naturally avoids posterior collapse, we use it to further understand the significance of the KL term in controlling the information transmitted through the VAE channel. Within this framework, we explore different properties of the estimated posterior distribution, and highlight the trade-off between the amount of information encoded in a latent code during training, and the generative capacity of the model.
%R 10.18653/v1/D19-5612
%U https://aclanthology.org/D19-5612/
%U https://doi.org/10.18653/v1/D19-5612
%P 118-127
Markdown (Informal)
[On the Importance of the Kullback-Leibler Divergence Term in Variational Autoencoders for Text Generation](https://aclanthology.org/D19-5612/) (Prokhorov et al., NGT 2019)
ACL