\documentclass{article} \usepackage{microtype} \usepackage{booktabs} \usepackage{hyperref} \newcommand{\theHalgorithm}{\arabic{algorithm}} \usepackage[accepted]{icml2018} \usepackage{graphicx} \usepackage{amsmath} \usepackage{amssymb} \usepackage{verbatim} \usepackage{xcolor} \usepackage{tikz} \usepackage{url} \usepackage{float} \usepackage{subfig} \usepackage{xstring} \usepackage{tikz} \usetikzlibrary{automata,calc,backgrounds,arrows,positioning, shapes.misc,quotes,arrows.meta} \usepackage{color} \usepackage{pgfplots} \usepackage{mathtools} \usepackage{array} \usepackage{multirow} \newcommand{\vc}[1]{{\pmb{#1}}} \newcommand{\x}{{\pmb{x}}} \newcommand{\y}{{\pmb{y}}} \renewcommand{\t}{{\pmb{t}}} \newcommand{\z}{{\pmb{z}}} \newcommand{\Z}{\pmb{Z}} \newcommand{\h}{{\pmb{h}}} \renewcommand{\a}{{\pmb{a}}} \renewcommand{\b}{{\pmb{b}}} \renewcommand{\c}{{\pmb{c}}} \renewcommand{\d}{{\pmb{d}}} \newcommand{\g}{{\pmb{g}}} \newcommand{\q}{{\pmb{q}}} \newcommand{\W}{{\pmb{W}}} \newcommand{\btheta}{{\pmb{\theta}}} \newcommand{\bzeta}{{\pmb{\zeta}}} \newcommand{\bmu}{{\pmb{\mu}}} \newcommand{\bnu}{{\pmb{\nu}}} \newcommand{\bphi}{{\pmb{\phi}}} \newcommand{\arash}[1]{{\color{red}[[#1]]}} \newcommand{\amir}[1]{\textbf{{\color{violet}#1}}} \renewcommand{\L}{{\mathcal{L}}} \newcommand{\U}{{\mathcal{U}}} \newcommand{\E}{{\mathbb{E}}} \newcommand{\uu}{$\uparrow$} \newcommand{\dd}{$\downarrow$} \def\KL{\text{KL}} \def\H{\text{H}} \tikzset{ annotated cuboid/.pic={ \tikzset{every edge quotes/.append style={midway, auto}, /cuboid/.cd, #1 } \draw [-, every edge/.append style={pic actions, densely dashed, opacity=.5}, pic actions] (0,0,0) coordinate (o) -- ++(-\cubescale*\cubex,0,0) coordinate (a) -- ++(0,-\cubescale*\cubey,0) coordinate (b) edge coordinate [pos=1] (g) ++(0,0,-\cubescale*\cubez) -- ++(\cubescale*\cubex,0,0) coordinate (c) -- cycle (o) -- ++(0,0,-\cubescale*\cubez) coordinate (d) -- ++(0,-\cubescale*\cubey,0) coordinate (e) edge (g) -- (c) -- cycle (o) -- (a) -- ++(0,0,-\cubescale*\cubez) coordinate (f) edge (g) -- (d) -- cycle; }, /cuboid/.search also={/tikz}, /cuboid/.cd, width/.store in=\cubex, height/.store in=\cubey, depth/.store in=\cubez, units/.store in=\cubeunits, scale/.store in=\cubescale, width=10, height=10, depth=10, units=cm, scale=1., } \icmltitlerunning{DVAE++: Discrete Variational Autoencoders with Overlapping Transformations} \begin{document} \twocolumn[ \icmltitle{DVAE++: Discrete Variational Autoencoders with \\ Overlapping Transformations} \begin{icmlauthorlist} \icmlauthor{Arash Vahdat}{quad} \icmlauthor{William G. Macready}{quad} \icmlauthor{Zhengbing Bian}{quad} \icmlauthor{Amir Khoshaman}{quad} \icmlauthor{Evgeny Andriyash}{quad} \end{icmlauthorlist} \icmlaffiliation{quad}{Quadrant.ai, D-Wave Systems Inc., Burnaby, BC, Canada} \icmlcorrespondingauthor{Arash Vahdat}{arash@quadrant.ai} \icmlkeywords{Machine Learning, ICML, Generative Learning, Discrete Variational Autoencoders} \vskip 0.3in ] \printAffiliationsAndNotice{} \begin{abstract} Training of discrete latent variable models remains challenging because passing gradient information through discrete units is difficult. We propose a new class of smoothing transformations based on a mixture of two overlapping distributions, and show that the proposed transformation can be used for training binary latent models with either directed or undirected priors. We derive a new variational bound to efficiently train with Boltzmann machine priors. Using this bound, we develop DVAE++, a generative model with a global discrete prior and a hierarchy of convolutional continuous variables. Experiments on several benchmarks show that overlapping transformations outperform other recent continuous relaxations of discrete latent variables including Gumbel-Softmax \cite{maddison2016concrete, jang2016categorical}, and discrete variational autoencoders \cite{rolfe2016discrete}. \end{abstract} \vspace{-0.5cm} \section{Introduction} Recent years have seen rapid progress in generative modeling made possible by advances in deep learning and stochastic variational inference. The reparameterization trick \cite{kingma2014vae, rezende2014stochastic} has made stochastic variational inference efficient by providing lower-variance gradient estimates. However, reparameterization, as originally proposed, does not easily extend to semi-supervised learning, binary latent attribute models, topic modeling, variational memory addressing, hard attention models, or clustering, which require discrete latent-variables. Continuous relaxations have been proposed for accommodating discrete variables in variational inference \cite{maddison2016concrete, jang2016categorical, rolfe2016discrete}. The Gumbel-Softmax technique \cite{maddison2016concrete, jang2016categorical} defines a temperature-based continuous distribution that in the zero-temperature limit converges to a discrete distribution. However, it is limited to categorical distributions and does not scale to multivariate models such as Boltzmann machines (BM). The approach presented in \cite{rolfe2016discrete} can train models with BM priors but requires careful handling of the gradients during training. We propose a new class of smoothing transformations for relaxing binary latent variables. The method relies on two distributions with overlapping support that in the zero temperature limit converge to a Bernoulli distribution. We present two variants of smoothing transformations using a mixture of exponential and a mixture of logistic distributions. We demonstrate that overlapping transformations can be used to train discrete directed latent models as in \cite{maddison2016concrete, jang2016categorical}, \textit{and} models with BMs in their prior as in \cite{rolfe2016discrete}. In the case of BM priors, we show that the Kullback-Leibler (KL) contribution to the variational bound can be approximated using an analytic expression that can be optimized using automatic differentiation without requiring the special treatment of gradients in \cite{rolfe2016discrete}. Using this analytic bound, we develop a new variational autoencoder (VAE) architecture called DVAE++, which uses a BM prior to model discontinuous latent factors such as object categories or scene configuration in images. DVAE++ is inspired by \cite{rolfe2016discrete} and includes continuous local latent variables to model locally smooth features in the data. DVAE++ achieves comparable results to the state-of-the-art techniques on several datasets and captures semantically meaningful discrete aspects of the data. We show that even when all continuous latent variables are removed, DVAE++ still attains near state-of-the-art generative likelihoods. \subsection{Related Work} Training of models with discrete latent variables $\z$ requires low-variance estimates of gradients of the form $\nabla_\phi \mathbb{E}_{q_{\phi}(\z)}[f(\z)]$. Only when $\z$ has a modest number of configurations (as in semi-supervised learning \cite{kingma2014semi} or semi-supervised generation \cite{maaloe2017semi}) can the gradient of the expectation be decomposed into a summation over configurations. The REINFORCE technique \cite{williams1992simple} is a more scalable method that migrates the gradient inside the expectation: $\nabla_{\phi} \mathbb{E}_{q_\phi(\z)}f(\z) = \mathbb{E}_{q_{\phi}(\z)} [f(\z) \nabla_{\phi} \log{q_{\phi}(\z)}].$ Although the REINFORCE estimate is unbiased, it suffers from high variance and carefully designed ``control variates" are required to make it practical. Several works use this technique and differ in their choices of the control variates. NVIL \cite{mnih2014neural} uses a running average of the function, $f(\z)$, and an input-dependent \emph{baseline}. VIMCO \cite{mnih2016variational} is a multi-sample version of NVIL that has baselines tailored for each sample based on all the other samples. MuProp \cite{gu2015muprop} and DARN \cite{gregorICML14} are two other REINFORCE-based methods (with non-zero biases) that use a Taylor expansion of the function $f(\z)$ to create control variates. To address the high variance of REINFORCE, other work strives to make discrete variables compatible with the reparametrization technique. A primitive form arises from estimating the discrete variables by a continuous function during back-propagation. For instance, in the case of Bernoulli distribution, the latent variables can be approximated by their mean value. This approach is called the \textit{straight-through (ST) estimator} \cite{bengio2013estimating}. Another way to make discrete variables compatible with the reparametrization is to relax them into a continuous distribution. Concrete \cite{maddison2016concrete} or Gumbel-Softmax \cite{jang2016categorical} adopt this strategy by adding Gumbel noise to the logits of a softmax function with a temperature hyperparameter. A slope-annealed version of the ST estimator is proposed by \cite{chung2016hierarchical} and is equivalent to the Gumbel-Softmax approach for binary variables. REBAR \cite{tucker2017rebar} is a recent method that blends REINFORCE with Concrete to synthesize control variates. \cite{rolfe2016discrete} pairs discrete variables with auxiliary continuous variables and marginalizes out the discrete variables. Both overlapping transformations and Gumbel-based approaches offer smoothing through non-zero temperature; however, overlapping transformations offer additional freedom through the choice of the mixture distributions. \section{Background} Let $\x$ represent observed random variables and $\z$ latent variables. The joint distribution over these variables is defined by the generative model $p(\x, \z) = p(\z) p(\x|\z)$, where $p(\z)$ is a prior distribution and $p(\x|\z)$ is a probabilistic decoder. Given a dataset $\pmb{X} = \{\x^{(1)}, \dots, \x^{(N)}\}$, the parameters of the model are trained by maximizing the log-likelihood: \begin{equation} \log p(\pmb{X}) = \sum_{i=1}^N \log p(\x^{(i)}). \nonumber \end{equation} Typically, computing $\log p(\x)$ requires an intractable marginalization over the latent variables $\z$. To address this problem, the VAE~\cite{kingma2014vae} introduces an inference model or probabilistic encoder $q(\z|\x)$ that infers latent variables for each observation. In the VAE, instead of the maximizing the marginal log-likelihood, a variational lower bound (ELBO) is maximized: \begin{equation} \label{eq:elbo} \log p(\x) \geq \E_{q(\z|\x)}\bigl[\log p(\x|\z)\bigr] - \KL\bigl(q(\z|\x) || p(\z)\bigr). \end{equation} The gradient of this objective is computed for the parameters of both the encoder and decoder using the reparameterization trick. With reparametrization, the expectation with respect to $q(\z|\x)$ in Eq.~\eqref{eq:elbo} is replaced with an expectation with respect to a known optimization-parameter-independent base distribution and a differentiable transformation from the base distribution to $q(\z|\x)$. This transformation may be a scale-shift transformation, in the case of Gaussian base distributions, or rely on the inverse cumulative distribution function (CDF) in the general case. Following the law of the unconscious statistician, the gradient is then estimated using samples from the base distribution. Unfortunately, the reparameterization trick cannot be applied directly to the discrete latent variables because there is no differentiable transformation that maps a base distribution to a discrete distribution. Current remedies address this difficulty using a continuous relaxation of the discrete latent variables \cite{maddison2016concrete, jang2016categorical}. The discrete variational autoencoder (DVAE) \cite{rolfe2016discrete} develops a different approach which applies the reparameterization trick to a marginal distribution constructed by pairing each discrete variable with an auxiliary continuous random variable. For example, let $z \in \{0,1\}$ represent a binary random variable with the probability mass function $q(z|x)$. A smoothing transformation is defined using spike-and-exponential transformation $r(\zeta | z)$, where $r(\zeta | z=0)=\delta(\zeta)$ is a Dirac $\delta$ distribution and $r(\zeta | z=1) \propto \exp(\beta \zeta)$ is an exponential distribution defined for $\zeta \in [0, 1]$ with inverse temperature $\beta$ that controls the sharpness of the distribution. \cite{rolfe2016discrete} notes that the autoencoding term can be defined as: \begin{equation} \sum_z q(z|x) \!\! \int \!d\zeta \; r(\zeta | z) \log p(x|\zeta) = \int \!\! d\zeta \; q(\zeta|x) \log p(x|\zeta), \nonumber \end{equation} where the marginal \begin{equation} q(\zeta|x) = \sum_{z} q(z|x) r(\zeta|z) \label{eq:qMarg} \end{equation} is a mixture of two continuous distributions. By factoring the inference model so that $x$ depends on $\zeta$ rather than $z$, the discrete variables can be explicitly eliminated from the ELBO and the reparameterization trick applied. The smoothing transformations in \cite{rolfe2016discrete} are limited to spike-and-X type of transformations (e.g., spike-and-exp and spike-and-Gaussian) where $r(\zeta | z=0)$ is assumed to be a Dirac $\delta$ distribution. This property is required for computing the gradient of the KL term in the variational lower bound. \section{Overlapping Transformations} \label{sec:transform} \begin{figure} \centering \subfloat[smoothing transformations]{\includegraphics[scale=0.25, trim={0.4cm 0 0.4cm 0},clip]{Fig1_1.pdf}} \subfloat[inverse CDF]{\includegraphics[scale=0.25, trim={0.4cm 0 0.4cm 0},clip]{Fig1_2.pdf}} \caption{a) Smoothing transformations using exponential distributions. b) Inverse CDF as a function of $q(z=1|x)$ for $\rho=0.5$ in comparison to the spike-and-exp smoothing \cite{rolfe2016discrete}. The inverse CDF resulting from the mixture of exponential distributions approximates the step function that samples from the Bernoulli distribution. } \label{fig:exp_smooth} \end{figure} A symmetric smoothing transformation of binary variables can also be defined using two exponential distributions: \begin{align*} r(\zeta | z = 0) = \frac{e^{- \beta \zeta}}{Z_\beta} \quad \text{and} \quad r(\zeta | z = 1) = \frac{e^{\beta (\zeta - 1)}}{Z_\beta}, \end{align*} for $\zeta\in [0,1]$, where $Z_\beta = (1 - e^{-\beta})/\beta$. These conditionals, visualized in Fig.~\ref{fig:exp_smooth}(a), define the mixture distribution $q(\zeta|x)$ of Eq.~\eqref{eq:qMarg}. The scalar $\beta$ acts as an inverse temperature as in the Gumbel softmax relaxation, and as $\beta \rightarrow \infty$, $q(\zeta|x)$ approaches $q(z=0|x)\delta(\zeta) + q(z=1|x)\delta(\zeta-1)$. Application of the reparameterization trick for $q(\zeta|x)$ requires the inverse CDF of $q(\zeta|x)$. In Appendix~\ref{app:overlap} of the supplementary material, we show that the inverse CDF is \begin{equation} F^{-1}_{q(\zeta|x)}(\rho)= -\frac{1}{\beta} \log{\frac{-b + \sqrt{b^2 - 4c}}{2}} \label{eq:inv_mix} \end{equation} where $b = [\rho + e^{-\beta}(q - \rho)]/(1 - q) -1$ and $c = -[q e^{-\beta}]/(1-q)$. Eq.~\eqref{eq:inv_mix} is a differentiable function that converts a sample $\rho$ from the uniform distribution $\mathcal{U}(0,1)$ to a sample from $q(\zeta|x)$. As shown in Fig.~\ref{fig:exp_smooth}(b) the inverse CDF approaches a step function as $\beta\rightarrow\infty$. However, to benefit from gradient information during training, $\beta$ is set to a finite value. Appendix~\ref{app:invCDFViz} provides further visualizations comparing overlapping transformations to Concrete smoothing~\cite{maddison2016concrete, jang2016categorical}. The overlapping exponential distributions defined here can be generalized to any pair of smooth distributions converging to $\delta(\zeta)$ and $\delta(\zeta-1)$. In Appendix~\ref{app:otherOverlap}, we provide analogous results for logistic smoothing distributions. Next, we apply overlapping transformations to the training of generative models with discrete latent variables. We consider both directed and undirected latent variable priors. \begin{figure} \centering \subfloat[]{ \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=1.5cm, thick, scale=0.7] \tikzstyle{every state}=[fill=white,draw=black,text=black, transform shape] \node[state] (z1) {$\z_1$}; \node[state] (z2) [below of=z1] {$\z_2$}; \node[state] (x) [below of=z2] {$\x$}; \path (z1) edge [bend left] (x); \path (z2) edge (x); \path (z1) edge (z2); \end{tikzpicture} }\hspace{-1mm}\subfloat[]{ \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=1.5cm, thick, scale=0.7] \tikzstyle{every state}=[fill=white,draw=black,text=black, transform shape] \node[state] (x) {$\x$}; \node[state] (z1) [below of=x] {$\z_1$}; \node[state] (z2) [below of=z1] {$\z_2$}; \path (x) edge (z1); \path (z1) edge (z2); \path (x) edge [bend left] (z2); \end{tikzpicture} }\hspace{-1mm}\subfloat[]{ \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=1.5cm, thick, scale=0.7] \tikzstyle{every state}=[fill=white,draw=black,text=black, transform shape] \node[state] (z1) {$\z_1$}; \node[state] (zeta1) [below of=z1] {$\bzeta_1$}; \node[state] (z2) [right= 0.12cm of z1] {$\z_2$}; \node[state] (zeta2) [below of=z2] {$\bzeta_2$}; \node[state] (x) [below of=zeta2] {$\x$}; \path (z1) edge (zeta1); \path (z2) edge (zeta2); \path (zeta1) edge (x); \path (zeta2) edge (x); \path (zeta1) edge (z2) ; \end{tikzpicture} }\hspace{-1mm}\subfloat[]{ \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=1.5cm, thick, scale=0.7] \tikzstyle{every state}=[fill=white,draw=black,text=black, transform shape] \node[state] (x) {$\x$}; \node[state] (z1) [below of=x] {$\z_1$}; \node[state] (zeta1) [below of=z1] {$\bzeta_1$}; \node[state] (z2) [right= 0.12cm of z1] {$\z_2$}; \node[state] (zeta2) [below of=z2] {$\bzeta_2$}; \path (x) edge (z1); \path (x) edge (z2); \path (z1) edge (zeta1); \path (zeta1) edge (z2) ; \path (z2) edge (zeta2); \end{tikzpicture} }\hspace{-1mm}\subfloat[]{ \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=1.5cm, thick, scale=0.7] \tikzstyle{every state}=[fill=white,draw=black,text=black, transform shape] \node[state] (z1) {$\bzeta_1$}; \node[state] (z2) [below of=z1] {$\bzeta_2$}; \node[state] (x) [below of=z2] {$\x$}; \path (z1) edge [bend left] (x); \path (z2) edge (x); \path (z1) edge (z2); \end{tikzpicture} }\hspace{-1mm}\subfloat[]{ \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=1.5cm, thick, scale=0.7] \tikzstyle{every state}=[fill=white,draw=black,text=black, transform shape] \node[state] (x) {$\x$}; \node[state] (z1) [below of=x] {$\bzeta_1$}; \node[state] (z2) [below of=z1] {$\bzeta_2$}; \path (x) edge (z1); \path (z1) edge (z2); \path (x) edge [bend left] (z2); \end{tikzpicture} }\hspace{-1mm}\subfloat[]{ \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=1.5cm, thick, scale=0.7] \tikzstyle{every state}=[fill=white,draw=black,text=black, transform shape] \node[state] (z1) {$\z_1$}; \node[state] (z1) {$\z_1$}; \node[state] (zeta1) [below of=z1] {$\bzeta_1$}; \node[state] (z2) [right= 0.15cm of z1] {$\z_2$}; \draw [dashed, rounded corners=.3cm] (-0.6,-0.6) rectangle (1.8,0.6); \node[state] (zeta2) [below of=z2] {$\bzeta_2$}; \node[state] (x) [below of=zeta2] {$\x$}; \path (z1) edge (zeta1); \path (z2) edge (zeta2); \path (zeta1) edge (x); \path (zeta2) edge (x); \path (z1) edge [-] (z2) ; \end{tikzpicture} } \caption{(a) A generative model with binary latent variables $\z_1$ and $\z_2$, and (b) the corresponding inference model. In (c) and (d), the continuous $\zeta$ is introduced and dependencies on $\z$ are transferred to dependencies on $\zeta$. In (e) and (f) the binary latent variables $\z$ are marginalized out. (g) A generative model with a Boltzmann machine (dashed) prior.} \label{fig:sbn} \end{figure} \section{Directed Prior} The simplest discrete prior is factorial; however, with conditioning, we can build complex dependencies. To simplify presentation, we illustrate a VAE prior with one or two groups of conditioning variables, but note that the approach straight-forwardly generalizes to many conditioning groups. Our approach parallels the method developed in \cite{rolfe2016discrete} for undirected graphical models. Consider the generative model in Fig.~\ref{fig:sbn}(a) and its corresponding inference model in Fig.~\ref{fig:sbn}(b). To train this model using smoothing transformations, we introduce the continuous $\zeta$ in Figs.~\ref{fig:sbn}(c) and \ref{fig:sbn}(d) in which dependencies on $z$ are transferred to dependencies on $\zeta$. In this way, binary latent variables influence other variables only through their continuous counterparts. In Figs.~\ref{fig:sbn}(e) and \ref{fig:sbn}(f) we show the same model but with $\z$ marginalized out. The joint $(\z,\bzeta)$ model of Figs.~\ref{fig:sbn}(c) and \ref{fig:sbn}(d) gives rise to a looser ELBO than the marginal $\bzeta$ model of Figs.~\ref{fig:sbn}(e) and \ref{fig:sbn}(f). \subsection{Joint ELBO} Assuming that $p(\z_1)$, $p(\z_2|\bzeta_1)$, $q(\z_1|\x)$, $q(\z_2|\x, \bzeta_1)$, $r(\bzeta_1|\z_1)$, and $r(\bzeta_2|\z_2)$ are factorial in both the inference and generative models, then $q(\bzeta_1|\x)$ and $q(\bzeta_2|\bzeta_1,\x)$ are also factorial with $q(\bzeta_{1}|\x) = \prod_i q(\zeta_{1,i}|\x)$ where $q(\zeta_{1,i}|\x)= \sum_{z_{1,i}} r(\zeta_{1,i}|z_{1,i}) q(z_{1,i}|\x)$, and $q(\bzeta_2|\bzeta_1,\x) = \prod_i q(\zeta_{2,i}|\bzeta_1,\x)$ where $q(\zeta_{2,i}|\bzeta_1,\x) = \sum_{z_{2,i}} r(\zeta_{2,i}|z_{2,i}) q(z_{2,i}|\bzeta_1,\x)$. In this case, the ELBO for the model in Fig.~\ref{fig:sbn}(c) and \ref{fig:sbn}(d) is \begin{flalign} &\E_{q(\bzeta_1|\x)}\left[ \E_{q(\bzeta_2|\bzeta_1,\x)} \left[\log p(\x|\bzeta_1, \bzeta_2) \right]\right] - \KL(q(\z_1|\x) || p(\z_1)) \nonumber \\ & \quad - \E_{q(\bzeta_1|\x)} \left[ \KL(q(\z_2|\x, \bzeta_1) || p(\z_2|\bzeta_1)) \right]. \label{eq:elbo_sbn} \end{flalign} The KL terms corresponding to the divergence between factorial Bernoulli distributions have a closed form. The expectation over $\bzeta_1$ and $\bzeta_2$ is reparameterized using the technique presented in Sec.~\ref{sec:transform}. \subsection{Marginal ELBO} \label{sec:cont_relax} The ELBO for the marginal graphical model of Fig.~\ref{fig:sbn}(e) and Fig.~\ref{fig:sbn}(f) is \begin{flalign} &\E_{q(\bzeta_1|\x)}\left[ \E_{q(\bzeta_2|\x, \bzeta_1)} \left[\log p(\x|\bzeta_1, \bzeta_2) \right]\right] - \KL(q(\bzeta_1|\x) || p(\bzeta_1)) \nonumber \\ & \quad - \E_{q(\bzeta_1|\x)} \left[ \KL(q(\bzeta_2|\x, \bzeta_1) || p(\bzeta_2|\bzeta_1)) \right] \label{eq:elbo_sbn2} \end{flalign} with $p(\bzeta_1) = \prod_i p(\zeta_{1, i})$ where $p(\zeta_{1, i}) = \sum_{z_i} r(\zeta_{1,i}|z_{1,i}) p(z_{1,i})$ and $p(\bzeta_2|\bzeta_1)= \prod_i p(\zeta_{2,i}|\bzeta_1)$ where $p(\zeta_{2,i}|\bzeta_1) = \sum_{z_{2,i}} r(\zeta_{2,i}|z_{2,i}) p(z_{2,i}|\bzeta_1)$. The KL terms no longer have a closed form but can be estimated with the Monte Carlo method. In Appendix~\ref{app:elbos}, we show that Eq.~\eqref{eq:elbo_sbn2} provides a tighter bound on $\log p(\x)$ than does Eq.~\eqref{eq:elbo_sbn}. \section{Boltzmann Machine Prior} \cite{rolfe2016discrete} defined an expressive prior over binary latent variables by using a Boltzmann machine. We build upon that work and present a simpler objective that can still be trained with a low-variance gradient estimate. To simplify notation, we assume that the prior distribution over the latent binary variables is a restricted Boltzmann machine (RBM), but these results can be extended to general BMs. An RBM defines a probability distribution over binary random variables arranged on a bipartite graph as $p(\z_1, \z_2) = e^{-E(\z_1,\z_2)}/Z$ where $E(\z_1,\z_2) = -\a_1^T \z_1 - \a_2^T\z_2 - \z_1^T \W \z_2$ is an energy function with linear biases $\a_1$ and $\a_2$, and pairwise interactions $\W$. $Z$ is the partition function. Fig.~\ref{fig:sbn}(g) visualizes a generative model with a BM prior. As in Figs.~\ref{fig:sbn}(c) and \ref{fig:sbn}(d), conditionals are formed on the auxiliary variables $\bzeta$ instead of the binary variables $\z$. The inference model in this case is identical to the model in Fig.~\ref{fig:sbn}(d) and it infers both $\z$ and $\bzeta$ in a hierarchical structure. The autoencoding contribution to the ELBO with an RBM prior is again the first term in Eq.~\eqref{eq:elbo_sbn} since both models share the same inference model structure. However, computing the KL term with the RBM prior is more challenging. Here, a novel formulation for the KL term is introduced. Our derivation can be used for training discrete variational autoencoders with a BM prior without any manual coding of gradients. We use $\E_{q(\z, \bzeta|\x)}[f] = \E_{q(\bzeta|\x)}\bigl[\E_{q(\z|\x, \bzeta)}[f]\bigr]$ to compute the KL contribution to the ELBO: {\small \begin{align} &\KL\bigl(q(\z_1, \z_2, \bzeta_1, \bzeta_2 | \x) \| p(\z_1, \z_2, \bzeta_1, \bzeta_2)\bigr) = \nonumber \\ & \ \log Z -\H\bigl(q(\z_1|\x)\bigr) - \E_{q(\bzeta_1|\x)} \left[\H\bigl(q(\z_2|\x, \bzeta_1)\bigr)\right] + \label{eq:kl_simple} \\ & +\E_{q(\bzeta_1|\x)}\!\big[\E_{q(\bzeta_2|\x, \bzeta_1)}\!\big[\underbrace{\E_{q(\z_1|\x, \bzeta_1)}\!\big[ \E_{q(\z_2|\x, \bzeta_1, \bzeta_2)}\!\big[ E(\z_1, \z_2) \big]\big]}_{\text{cross-entropy}}\big]\big]. \nonumber \end{align}} Here, $\H(q)$ is the entropy of the distribution $q$, which has a closed form when $q$ is factorial Bernoulli. The conditionals $q(\z_1|\x, \bzeta_1)$ and $q(\z_2|\x, \bzeta_1, \bzeta_2)$ are both factorial distributions that have analytic expressions. Denoting \begin{align*} \mu_{1,i}(\x) &\equiv q(z_{1,i}=1|\x), \\ \nu_{1,i}(\x, \bzeta_1) &\equiv q(z_{1,i}=1|\x, \bzeta_1), \\ \mu_{2,i}(\x, \bzeta_1) &\equiv q(z_{2,i}=1|\x, \bzeta_1), \\ \nu_{2,i}(\x, \bzeta_1, \bzeta_2) &\equiv q(z_{2,i}=1|\x, \bzeta_1, \bzeta_2), \end{align*} it is straightforward to show that \begin{align*} \footnotesize \nu_{1, i}(\x, \bzeta_1) &= \frac{q(z_{1,i}=1|\x) r(\zeta_{1, i}|z_{1, i}=1)}{\sum_{z_{1,i}}q(z_{1,i}|\x) r(\zeta_{1, i}|z_{1, i})} = \\ &= \sigma\Bigl(g(\mu_{1,i}(\x)) + \log\bigl[\frac{r(\zeta_{1, i}|z=1)}{r(\zeta_{1, i}|z=0)}\bigr]\Bigr), \end{align*} where $\sigma(x)=1/(1+e^{-x})$ is the logistic function, and $g(\mu) \equiv \log \bigl[\mu/\bigl(1 - \mu\bigr)\bigr]$ is the logit function. A similar expression holds for $\bnu_2(\x, \bzeta_1, \bzeta_2)$. The expectation marked as cross-entropy in Eq.~\eqref{eq:kl_simple} corresponds to the cross-entropy between a factorial distribution and an unnormalized Boltzmann machine which is \begin{equation*} - \a_1^T \bnu_1(\x, \bzeta_1) - \a_2^T \bnu_2(\x, \bzeta_1, \bzeta_2) - \bnu_1(\x, \bzeta_1)^T \W \bnu_2(\x, \bzeta_1, \bzeta_2). \end{equation*} Finally, we use the equalities $\E_{q(\bzeta_1|\x)}[\bnu_1(\x, \bzeta_1)] = \bmu_1(\x)$ and $\E_{q(\bzeta_2|\x, \bzeta_1)}[\bnu_2(\x, \bzeta_1, \bzeta_2)] = \bmu_2(\x, \bzeta_1)$ to simplify the cross-entropy term which defines the KL as \begin{align} &\KL\bigl(q(\z_1, \z_2, \bzeta_1, \bzeta_2 | \x) \| p(\z_1, \z_2, \bzeta_1, \bzeta_2)\bigr) = \log Z \nonumber \\ & -\H\bigl(q(\z_1|\x)\bigr) - \E_{q(\bzeta_1|\x)} \left[\H\bigl(q(\z_2|\x, \bzeta_1)\bigr)\right] \nonumber \\ &- \a_1^T \bmu_1(\x) - \E_{q(\bzeta_1|\x)}\left[ \a_2^T \bmu_2(\x, \bzeta_1) \right] \nonumber \\ &-\E_{q(\bzeta_1|\x)} \left[ \bnu_1(\x, \bzeta_1)^T \W \bmu_2(\x, \bzeta_1) \right]. \nonumber \end{align} All terms contributing to the KL other than $\log Z$ can be computed analytically given samples from the hierarchical encoder. Expectations with respect to $q(\bzeta_1|\x)$ are reparameterized using the inverse CDF function. Any automatic differentiation (AD) library can then back-propagate gradients through the network. Only $\log Z$ requires special treatment. In Appendix~\ref{app:gradZ}, we show how this term can also be included in the objective function so that its gradient is computed automatically. The ability of AD to calculate gradients stands in contrast to \cite{rolfe2016discrete} where gradients must be manually coded. This pleasing property is a result of $r(\zeta|z)$ having the same support for both $z=0$ and $z=1$, and having a probabilistic $q(z|x, \zeta)$ which is not the case for the spike-and-X transformations of \cite{rolfe2016discrete}. \section{DVAE++} In previous sections, we have illustrated with simple examples how overlapping transformations can be used to train discrete latent variable models with either directed or undirected priors. Here, we develop a network architecture (DVAE++) that improves upon convolutional VAEs for generative image modeling. DVAE++ features both global discrete latent variables (to capture global properties such as scene or object type) and local continuous latent variables (to capture local properties such as object pose, orientation, or style). Both generative and inference networks rely on an autoregressive structure defined over groups of latent and observed variables. As we are modeling images, conditional dependencies between groups of variables are captured with convolutional neural networks. DVAE++ is similar to the convolutional VAEs used in \cite{kingma2016improved, chen2016variational}, but does not use normalizing flows. \subsection{Graphical Model} The DVAE++ graphical model is visualized in Fig.~\ref{fig:pgm}. Global and local variables are indicated by $\z$ and $\h$ respectively. Subscripts indicate different groups of random variables. The conditional distribution of each group is factorial --except for $\z_1$ and $\z_2$ in the prior, which is modeled with an RBM. Global latent variables are represented with boxes and local variables are represented with 3D volumes as they are convolutional. Groups of local continuous variables are factorial (independent). This assumption limits the ability of the model to capture correlations at different spatial locations and different depths. While the autoregressive structure mitigates this defect, we rely mainly on the discrete global latent variables to capture long-range dependencies. The discrete nature of the global RBM prior allows DVAE++ to capture richly-correlated discontinuous hidden factors that influence data generation. Fig.~\ref{fig:pgm}(a) defines the generative model as \begin{align*} p(\z, \bzeta, \h, \x) =& \, p(\z) \prod_i r(\zeta_{1,i}|z_{1,i}) r(\zeta_{2,i}|z_{2,i}) \times \\ & \prod_j p(\h_j | \h_{,>=stealth',shorten >=1pt,auto,node distance=1.5cm, thick, scale=0.75] \tikzstyle{every state}=[fill=white,draw=black,text=black, transform shape] \pic [fill=white, draw=black] at (0,0) {annotated cuboid={width=0.5, height=1.5, depth=1.5}}; \pic [fill=white, draw=black] at (2,0) {annotated cuboid={width=0.5, height=1.5, depth=1.5}}; \pic [fill=white, draw=black] at (4,0) {annotated cuboid={width=0.5, height=1.5, depth=1.5}}; \node[] at (-0.25,-0.8) {$\h_1$}; \node[] at (1.75,-0.8) {$\h_2$}; \node[] at (3.75,-0.8) {$\h_3$}; \coordinate (a) at (0, -1.0, -1); \coordinate (b) at (1.0, -1.0, -1); \coordinate (c) at (2.0, -1.0, -1); \coordinate (d) at (3.0, -1.0, -1); \path[->, thick] (a) edge (b); \path[->, thick] (c) edge (d); \path[->, thick] (a) edge [bend right=50] (d); \node at (6.0,-2.5) [shape=rectangle,draw, minimum size=0.7cm] (x) {$\x$}; \coordinate (a1) at (-0.25, -1.8, 0.); \coordinate (b1) at (1.75, -1.8, 0.); \coordinate (c1) at (3.75, -1.8, 0.); \draw (a1) to [out=-90,in=180] (2.0, -2.5) to (x) ; \draw (b1) to [out=-90,in=180] (3.0, -2.5) to [out=0,in=180] (x) ; \draw (c1) to [out=-90,in=180] (5.0, -2.5) to [out=0,in=180] (x) ; \node at (1.0, 3.3) [shape=rectangle,draw, minimum size=0.6cm] (z01) {$\ \quad \z_{1}\quad \ $}; \node at (1.0, 2) [shape=rectangle,draw, minimum size=0.6cm] (zeta01) {$\ \quad \bzeta_{1}\quad \ $}; \node at (4.0, 3.3) [shape=rectangle,draw, minimum size=0.6cm] (z02) {$\ \quad \z_{2}\quad \ $}; \node at (4.0, 2) [shape=rectangle,draw, minimum size=0.6cm] (zeta02) {$\ \quad \bzeta_{2}\quad \ $}; \draw [dashed, rounded corners=.2cm] (-0.1,2.8) rectangle (5.1,3.8); \path[-, thick] (z01) edge [] (z02); \coordinate (a2) at (-0.35, 0.2, -1.5); \coordinate (a3) at (-0.25, 0.2, -1.5); \coordinate (b2) at (1.75, 0.2, -1.5); \coordinate (c2) at (3.65, 0.2, -1.5); \path[->, thick] (zeta01) edge [] (a2); \path[->, thick] (zeta01) edge [] (b2); \path[->, thick] (zeta01) edge [] (c2); \path[->, thick] (zeta02) edge [] (a3); \path[->, thick] (zeta02) edge [] (b2); \path[->, thick] (zeta02) edge [] (c2); \path[->, thick] (z01) edge [] (zeta01); \path[->, thick] (z02) edge [] (zeta02); \draw (zeta01) to [out=180,in=180] (-0.0, -2.5) to [out=0,in=180] (x) ; \draw (zeta02) to [out=0,in=90] (6.0, 1.0) to [out=-90,in=90] (x) ; \end{tikzpicture} } \hspace{1cm} \subfloat[inference model]{ \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=1.5cm, thick, scale=0.75] \tikzstyle{every state}=[fill=white,draw=black,text=black, transform shape] \pic [fill=white, draw=black] at (0,0) {annotated cuboid={width=0.5, height=1.5, depth=1.5}}; \pic [fill=white, draw=black] at (2,0) {annotated cuboid={width=0.5, height=1.5, depth=1.5}}; \pic [fill=white, draw=black] at (4,0) {annotated cuboid={width=0.5, height=1.5, depth=1.5}}; \node[] at (-0.25,-0.8) {$\h_1$}; \node[] at (1.75,-0.8) {$\h_2$}; \node[] at (3.75,-0.8) {$\h_3$}; \coordinate (a) at (0, -1.0, -1); \coordinate (b) at (1.0, -1.0, -1); \coordinate (c) at (2.0, -1.0, -1); \coordinate (d) at (3.0, -1.0, -1); \path[->, thick] (a) edge (b); \path[->, thick] (c) edge (d); \path[->, thick] (a) edge [bend right=50] (d); \node at (-2.0,-2.5) [shape=rectangle,draw, minimum size=0.7cm] (x) {$\x$}; \coordinate (a1) at (-0.25, -1.7, 0.); \coordinate (b1) at (1.75, -1.7, 0.); \coordinate (c1) at (3.75, -1.7, 0.); \draw (x) to (-1.0, -2.5) to [out=0,in=-110] (a1) ; \draw (x) to ( 1.0, -2.5) to [out=0,in=-110] (b1) ; \draw (x) to ( 3.0, -2.5) to [out=0,in=-110] (c1) ; \node at (0.25, 2) [shape=rectangle,draw, minimum size=0.6cm] (zeta01) {$\ \quad \bzeta_{1}\quad \ $}; \node at (3.25, 2) [shape=rectangle,draw, minimum size=0.6cm] (zeta02) {$\ \quad \bzeta_{2}\quad \ $}; \node at (0.25, 3.3) [shape=rectangle,draw, minimum size=0.6cm] (z01) {$\ \quad \z_{1}\quad \ $}; \node at (3.25, 3.3) [shape=rectangle,draw, minimum size=0.6cm] (z02) {$\ \quad \z_{2}\quad \ $}; \path[->, thick] (z01) edge [] (zeta01); \path[->, thick] (z02) edge [] (zeta02); \draw (zeta01) to [out=10,in=190] (z02) ; \coordinate (a2) at (-0.25, 0.2, -1.5); \coordinate (b2) at (1.75, 0.2, -1.5); \coordinate (c2) at (3.65, 0.2, -1.5); \coordinate (c3) at (3.85, 0.2, -1.5); \path[->, thick] (zeta01) edge [] (a2); \path[->, thick] (zeta01) edge [] (b2); \path[->, thick] (zeta01) edge [] (c2); \path[->, thick] (zeta02) edge [] (a2); \path[->, thick] (zeta02) edge [] (b2); \path[->, thick] (zeta02) edge [] (c3); \draw (x) to (-2.0, 2.0) to [out=90,in=-180] (z01) ; \draw (x) to (-2.0, 2.0) to [out=90,in=-215] (z02) ; \end{tikzpicture} } \caption{a) In the generative model, binary global latent variables $\z_1$ and $\z_2$ are modeled by an RBM (dashed) and a series of local continuous variables are generated in an autoregressive structure using residual networks. b) After forming distributions over the global variables, the inference model defines the conditional on the local latent variables similarly using residual networks.} \label{fig:pgm} \end{figure*} The inference model of Fig.~\ref{fig:pgm}(b) conditions over latent variables in a similar order as the generative model: \begin{align*} q(\z, \bzeta, \h | \x) =& \ q(\z_1 | \x) \prod_i r(\zeta_{1,i}|z_{1,i}) \times \\ & \ q(\z_2|\x, \bzeta_1) \prod_k r(\zeta_{2,k}|z_{2,k}) \prod_j q(\h_j | \bzeta, \h_{