diff --git "a/papers/2402/2402.03985.tex" "b/papers/2402/2402.03985.tex" new file mode 100644--- /dev/null +++ "b/papers/2402/2402.03985.tex" @@ -0,0 +1,1919 @@ +\documentclass{article} + +\parindent 0pt +\topsep 4pt plus 1pt minus 2pt +\partopsep 1pt plus 0.5pt minus 0.5pt +\itemsep 2pt plus 1pt minus 0.5pt +\parsep 2pt plus 1pt minus 0.5pt +\parskip 6pt +\evensidemargin -0.23in +\oddsidemargin -0.23in +\setlength\textheight{9.0in} +\setlength\textwidth{6.75in} +\setlength\columnsep{0.25in} +\setlength\headheight{10pt} +\setlength\headsep{10pt} +\addtolength{\topmargin}{-20pt} +\addtolength{\topmargin}{-0.29in} +\usepackage{authblk} + +\usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} + +\usepackage{microtype} +\usepackage{graphicx} +\usepackage{booktabs} \usepackage{xcolor} \usepackage{url} \usepackage{amsfonts} \usepackage{nicefrac} \usepackage{subcaption} +\captionsetup[figure]{font=small} +\usepackage{placeins} + +\usepackage{hyperref} + + +\usepackage[round]{natbib} +\renewcommand{\bibname}{References} + +\usepackage{multirow} +\usepackage{tikz} +\usetikzlibrary{shapes,arrows.meta,positioning,backgrounds,fit, graphs} + +\usepackage{amsmath} +\usepackage{amssymb} +\usepackage{amsthm} +\usepackage{thm-restate} + +\declaretheorem[name=Theorem, numberwithin=section]{theorem} +\declaretheorem[name=Lemma, sibling=theorem]{lemma} +\declaretheorem[name=Definition, sibling=theorem]{definition} +\declaretheorem[name=Corollary, sibling=theorem]{corollary} +\declaretheorem[name=Assumption, sibling=theorem]{assumption} +\declaretheorem[name=Condition, sibling=theorem]{condition} +\declaretheorem[name=Conjecture, sibling=theorem]{conjecture} + +\newcommand{\todo}[1]{} + +\newcommand{\calx}{\mathcal{X}} +\newcommand{\cals}{\mathcal{S}} +\newcommand{\caln}{\mathcal{N}} +\newcommand{\calm}{\mathcal{M}} +\newcommand{\CP}{\mathcal{E}} +\newcommand{\VP}{\mathcal{V}} +\newcommand{\calxd}{\calx^d} +\newcommand{\R}{\mathbb{R}} +\newcommand{\E}{\operatorname*{\mathbb{E}}} +\newcommand{\I}{\mathbb{I}} +\DeclareMathOperator*{\Var}{Var} +\DeclareMathOperator*{\Cov}{Cov} +\DeclareMathOperator*{\argmin}{argmin} +\DeclareMathOperator*{\argmax}{argmax} +\newcommand{\dx}{\,\mathrm{d}} +\newcommand{\sdp}{\tilde{s}} +\newcommand{\xsyn}{X^{Syn}} +\newcommand{\xreal}{X} +\newcommand{\xpred}{X^*} +\newcommand{\obs}{Z} +\newcommand{\sigmadp}{\sigma_{DP}} +\newcommand{\med}{\mathrm{MED}} +\newcommand{\medt}{\med_\theta} +\newcommand{\KL}[2]{\mathrm{KL}(#1 \, || \, #2)} +\newcommand{\epsilondelta}{(\epsilon, \delta)} +\newcommand{\thetahat}{\hat{\theta}} +\newcommand{\convprob}{\xrightarrow{P}} +\newcommand{\convdist}{\xrightarrow{d}} +\DeclareMathOperator{\TV}{TV} +\newcommand{\invchisq}{\text{Inv-}\chi^2} +\newcommand{\SDGP}{\mathrm{SDGP}} +\newcommand{\dscat}{D_s^{\mathrm{Cat}}} + +\renewcommand{\topfraction}{1.0} +\renewcommand{\bottomfraction}{1.0} + +\title{A Bias--Variance Decomposition for Ensembles over Multiple Synthetic Datasets} +\author[1]{Ossi Räisä} +\author[1]{Antti Honkela} +\affil[1]{Department of Computer Science, University of Helsinki} +\date{} + +\begin{document} + +\twocolumn[ +\maketitle +] + +\begin{abstract} +Recent studies have highlighted the benefits of generating multiple synthetic +datasets for supervised learning, from increased accuracy to more effective model selection and +uncertainty estimation. These benefits have clear empirical +support, but the theoretical understanding of them is currently very light. +We seek to increase the theoretical understanding by deriving bias-variance +decompositions for several settings of using multiple synthetic datasets. Our theory +predicts multiple synthetic datasets to be especially beneficial for high-variance +downstream predictors, and yields a simple rule of thumb to select the appropriate +number of synthetic datasets in the case of mean-squared error and Brier score. +We investigate how our theory works in practice by evaluating the performance +of an ensemble over many synthetic datasets for several real datasets and +downstream predictors. The results follow our theory, showing that our +insights are also practically relevant. +\end{abstract} + +\section{Introduction}\label{sec:introduction} +Synthetic data has recently attracted significant attention for several application in machine learning. +The idea is to generate a dataset that preserves the population-level attributes of the +real data, making the synthetic data useful for analysis, while also accomplishing a secondary task, +such as improving model evaluation~\citep{vanbreugelCanYouRely2023}, +fairness~\citep{vanbreugelDECAFGeneratingFair2021}, data +augmentation~\citep{antoniouDataAugmentationGenerative2018,dasConditionalSyntheticData2022} or +privacy~\citep{liewDataDistortionProbability1985, +rubin1993statistical}. +For the last application, \emph{differential privacy} (DP)~\citep{dworkCalibratingNoiseSensitivity2006} +is often combined with synthetic data +generation~\citep{hardtSimplePracticalAlgorithm2012,mckennaWinningNISTContest2021} to achieve +provable privacy protection, since +releasing synthetic data without DP can be vulnerable to membership inference +attacks~\citep{vanbreugelMembershipInferenceAttacks2023,meeusAchillesHeelsVulnerable2023}. + +Several lines of work have considered generating multiple synthetic datasets from one real +dataset for various purposes, including statistical +inference~\citep{rubin1993statistical,raghunathanMultipleImputationStatistical2003,raisaConsistentBayesianInference2023} +and supervised learning~\citep{vanbreugelSyntheticDataReal2023}. We focus on the latter setting. + +In supervised learning, \citet{vanbreugelSyntheticDataReal2023} +proposed using multiple synthetic datasets in the form of an +ensemble. They propose training a predictive model separately on each synthetic dataset, and +ensembling these models by averaging their predictions, which they call a +\emph{deep generative ensemble} (DGE). We drop the word ``deep'' in this paper and use +the term \emph{generative ensemble}, +as we do not require the generator to be deep in any sense. +The DGE was empirically demonstrated to be beneficial in several ways by +\citet{vanbreugelSyntheticDataReal2023}, including +predictive accuracy, model evaluation, model selection, and uncertainty estimation. +Followup work has applied DGEs to improve model evaluation under distribution shifts +and for small subgroups~\citep{vanbreugelCanYouRely2023}. + +However, \citet{vanbreugelSyntheticDataReal2023} have very little theoretical analysis of +the generative ensemble. They justify the ensemble by assuming the synthetic data is generated +from a posterior predictive distribution. This assumption can be justified heuristically for +deep generative models through a Bayesian interpretation of deep +ensembles~\citep{ +lakshminarayananSimpleScalablePredictive2017, +wilsonBayesianDeepLearning2020, +wilsonDeepEnsemblesApproximate2021}. +However this justification only applies to generators with highly multi-modal losses, like neural networks. +It also does not provide any insight on how different choices in the setup, like the +choice of downstream predictor, affect the performance of the ensemble. + +\looseness=-1 +The bias-variance decomposition~\citep{gemanNeuralNetworksBias1992} and its +generalisations~\citep{uedaGeneralizationErrorEnsemble1996,woodUnifiedTheoryDiversity2023a} +are classical tools that provide insight into supervised learning problems. +The standard bias-variance decomposition from \citet{gemanNeuralNetworksBias1992} +considers predicting $y\in \R$ given features $x\in \calx$, using predictor +$g(x; D)$ that receives training data $D$. +The mean-squared error (MSE) of $g$ can be decomposed into bias, variance, +and noise terms: +\begin{equation} + \underbrace{\E_{D,y}[(y - g)^2]}_{\mathrm{MSE}} + = \underbrace{(f(x) - \E_D[g])^2}_{\mathrm{Bias}} + + \underbrace{\Var_D[g]}_{\mathrm{Variance}} + + \underbrace{\Var_{y}[y]}_{\mathrm{Noise}} +\end{equation} +where we have shortened $g = g(x; D)$ and $f(x) = \E_{y}[y]$. $x$ is considered fixed, so all the random quantities in +the decomposition are implicitly conditioned on $x$. +While MSE is typically only used with regression +problems, the decomposition also applies to the Brier score~\citep{brier1950verification} +in classification, which is simply the MSE of class probabilities. + +We seek to provide deeper theoretical understanding of generative ensembles through bias-variance +decompositions, which provide a more fine-grained view of how different choices in the setup +affect the end result. +\paragraph{Contributions} +\begin{enumerate} + \item We derive a bias-variance decomposition for the MSE or + Brier score of + generative ensembles in Theorem~\ref{thm:mse-synthetic-data-decomposition}. + This decomposition is simply a sum of interpretable terms, which makes it possible to predict + how various choices affect the error. In particular, the number of synthetic datasets only + affects the variance-related terms in the decomposition, predicting that generative ensembles + work best with high-variance and low bias models, like interpolating decision trees. + \item We derive a simple rule of thumb from our decomposition + to select the number of synthetic datasets in Section~\ref{sec:estimating-effect-multiple-syn-datasets}. + In summary, 2 synthetic datasets give 50\% of the potential benefit from multiple synthetic + datasets, 10 give 90\% of the benefit and 100 give 99\% of the benefit. + This also applies to bagging ensembles like random forests, which is likely to be of + independent interest. + \item We generalise the decomposition of Theorem~\ref{thm:mse-synthetic-data-decomposition} + to differentially private (DP) generation algorithms that do not split their privacy budget + between the multiple synthetic datasets\footnote{ + Theorem~\ref{thm:mse-synthetic-data-decomposition} applies to DP algorithms that + split the privacy budget, but it is not clear if multiple synthetic datasets are + beneficial with these algorithms, as splitting the privacy budget between more synthetic + datasets means that each one requires adding more noise, degrading the quality of + the synthetic data. + } in Theorem~\ref{thm:mse-dp-synthetic-data-decomposition}. + \item We evaluate the performance of a generative ensemble on several + datasets, downstream prediction algorithms, and error metrics in Section~\ref{sec:experiments}. The + results show that out theory applies in practice: + multiple synthetic datasets generally decrease all of the error metrics, especially for + high-variance models where the theory predicts multiple synthetic datasets to have the + highest impact. In addition, our rule of thumb makes accurate predictions when the + error can be accurately estimated. +\end{enumerate} + +\subsection{Related Work} +Ensembling generative models has been independently proposed several +times~\citep{ +wangEnsemblesGenerativeAdversarial2016, +choiWAICWhyGenerative2019, +luziEnsemblesGenerativeAdversarial2020, +chenSelfSupervisedBlindImage2023, +vanbreugelSyntheticDataReal2023}. +The inspiration of our work comes from \citet{vanbreugelSyntheticDataReal2023}, who proposed +ensembling predictive models over multiple synthetic datasets, and empirically studied how +this improves several aspects of performance in classification. + +Generating multiple synthetic datasets has also been proposed with statistical inference in mind, +for both frequentist~\citep{ +rubin1993statistical, +raghunathanMultipleImputationStatistical2003, +raisaNoiseawareStatisticalInference2023}, +and recently Bayesian~\citep{raisaConsistentBayesianInference2023} inference. +These works use the multiple synthetic datasets to correct statistical inferences +for the extra uncertainty from synthetic data generation. + +The bias-variance decomposition was originally derived by \citet{gemanNeuralNetworksBias1992} +for standard regressors using MSE as the loss. \citet{jamesVarianceBiasGeneral2003} generalised +the decomposition to symmetric losses, and +\citet{pfauGeneralizedBiasvarianceDecomposition2013} +generalised it to Bregman divergences. + +\citet{uedaGeneralizationErrorEnsemble1996} were the first to +study the MSE bias-variance decomposition with ensembles, and +\citet{guptaEnsemblesClassifiersBiasVariance2022,woodUnifiedTheoryDiversity2023a} +later extended the ensemble decomposition to other losses. All of these also apply to generative +ensembles, but they only provide limited insight for them, as they do not separate the +synthetic data generation-related terms from the downstream-related terms. + + +\section{Bias-Variance Decompositions for Generative Ensembles}\label{sec:mse-decomposition} +In this section, we make our main theoretical contributions. We start by defining our +setting in Section~\ref{sec:problem-setting}. Then we derive a bias-variance decomposition for +generative ensembles in Section~\ref{sec:mse-decomposition-ge} and derive a simple +rule of thumb that can be used to select the number of synthetic datasets in +Section~\ref{sec:estimating-effect-multiple-syn-datasets}. The generative ensemble +decomposition doesn't apply to several differentially private synthetic data +generation methods that generate all synthetic datasets conditional on a single +privatised value, so we generalise the decomposition to apply to those in +Section~\ref{sec:mse-decomposition-dp}. We also present a third decomposition that +applies to Bregman divergences in Appendix~\ref{app:bregman-divergence-decomposition}. + +\subsection{Problem Setting}\label{sec:problem-setting} +We consider multiple synthetic datasets $D_s^{1:m}$, each of which is used to train +an instance of a predictive model $g$. These models are combined into an ensemble $\hat{g}$ +by averaging, so +\begin{equation} + \hat{g}(x; D_s^{1:m}) = \frac{1}{m}\sum_{i=1}^m g(x; D_s^i). +\end{equation} +We allow $g$ to be random, so $g$ can for example internally select among several predictive +models to use randomly. + +We assume the synthetic datasets are generated by first i.i.d.\ sampling parameters $\theta_{1:m} | D_r$ given the +real data, and then sampling synthetic datasets $D_s^i \sim p(D_s | \theta_i)$ given the parameters. +This is how synthetic datasets are sampled in DGE~\citep{vanbreugelSyntheticDataReal2023}, where $\theta_i$ +are the parameters of the generative neural network from independent training runs. This also encompasses bootstrapping, where +$\theta_i$ would be the real dataset\footnote{The $\theta_i$ random variables are i.i.d.\ if they +are deterministically equal.}, and $p(D_s | \theta_i)$ is the bootstrapping. + +We will also consider a more general setting that applies to some differentially private synthetic +data generators that do not fit into this setting in Section~\ref{sec:mse-decomposition-dp}. + +\subsection{Mean-squared Error Decomposition}\label{sec:mse-decomposition-ge} + +\begin{restatable}{theorem}{theoremmsesyntheticdatadecomposition}\label{thm:mse-synthetic-data-decomposition} + Let generator parameters $\theta_{1:m}$ be i.i.d.\ given the real data $D_r$, let + the synthetic datasets be $D_s^{i} \sim p(D_s | \theta_i)$ independently, and + let $\hat{g}(x; D_s^{1:m}) = \frac{1}{m}\sum_{i=1}^m g(x; D_s^i)$. Then + the mean-squared error in predicting $y$ from $x$ decomposes + into six terms: model variance (MV), synthetic data variance (SDV), real data variance (RDV), + synthetic data bias (SDB), model bias (MB), and noise $\Var_{y}[y]$: + \begin{equation} + \mathrm{MSE} = \frac{1}{m}\mathrm{MV} + \frac{1}{m}\mathrm{SDV} + + \mathrm{RDV}+ (\mathrm{SDB} + \mathrm{MB})^2 + \Var_y[y], + \label{eq:mse-decomposition-mean-model} + \end{equation} + where + \begin{align} + \mathrm{MSE} &= \E_{y, D_r, D_s^{1:m}}[(y - \hat{g}(x; D_s^{1:m}))^2] \\ + \mathrm{MV} &= \E_{D_r, \theta} \Var_{D_s|\theta}[g(x; D_s)] \\ + \mathrm{SDV} &= \E_{D_r}\Var_{\theta | D_r}\E_{D_s | \theta}[g(x; D_s)] \\ + \mathrm{RDV} &= \Var_{D_r}\E_{D_s|D_r}[g(x; D_s)] \\ + \mathrm{SDB} &= \E_{D_r}[f(x) - \E_{\theta | D_r}[f_\theta(x)]] \\ + \mathrm{MB} &= \E_{D_r, \theta}[f_\theta(x) - \E_{D_s|\theta}[g(x; D_s)]]. + \end{align} + $f(x) = \E_{y}[y]$ is the optimal predictor for real data, + $\theta$ is a single sample from the distribution of + $\theta_i$, $D_s\sim p(D_s | \theta)$ is + a single sample of the synthetic data generating process, and + $f_\theta$ is the optimal predictor for the synthetic data generating process + given parameters $\theta$. All random quantities are implicitly conditioned on $x$. +\end{restatable} +The proofs of all theorems are in Appendix~\ref{sec:missing-proofs}. + +We can immediately make actionable observations: +\begin{enumerate} + \item Increasing the number of synthetic datasets $m$ reduces the + impact of MV and SDV. This means that high-variance models, like + interpolating decision trees or 1-nearest neighbours, benefit the + most from multiple synthetic datasets. These models should be + favoured over low variance high bias models, even if the latter + were better on the real data. + \item There is a simple rule-of-thumb on how many synthetic datasets + are beneficial: $m$ synthetic datasets give a $1 - \frac{1}{m}$ fraction + of the possible benefit from multiple synthetic datasets. For example, + $m = 2$ gives 50\% of the benefit, $m = 10$ gives 90\% and $m = 100$ gives + 99\%. + This rule-of-thumb can be used to predict the MSE + with many synthetic datasets from the results on just two synthetic datasets. + More details in Section~\ref{sec:estimating-effect-multiple-syn-datasets}. +\end{enumerate} + +We can also consider two extreme scenarios: +\begin{enumerate} + \item Generator is fitted perfectly, and generates from the real data generating distribution. + Now + \begin{align} + \mathrm{MV} &= \Var_{D_r}[g(x; D_r)] \\ + \mathrm{SDV} &= 0 \\ + \mathrm{RDV} &= 0 \\ + \mathrm{SDB} &= 0 \\ + \mathrm{MB} &= \E_{D_r}[f(x) - g(x; D_r)] + \end{align} + With one synthetic dataset, the result is the standard bias-variance trade-off. With multiple + synthetic datasets, the impact of MV can be reduced, reducing the error compared to just using + real data. + \item Return the real data: $D_s | D_r = D_r$ deterministically\footnote{ + Theorem~\ref{thm:mse-synthetic-data-decomposition} applies + in this scenario even with multiple synthetic datasets, as the deterministically identical + synthetic datasets are independent as random variables. + }. + \begin{align} + \mathrm{MV} &= 0 \\ + \mathrm{SDV} &= 0 \\ + \mathrm{RDV} &= \Var_{D_r}[g(x; D_r)] \\ + \mathrm{SDB} &= 0 \\ + \mathrm{MB} &= \E_{D_r}[f(x) - g(x; D_r)] + \end{align} + The result is the standard bias-variance trade-off. The number of synthetic datasets does not matter, + as all of them would be the same anyway. +\end{enumerate} +While both of these scenarios are unrealistic, they may be approximated by a well-performing algorithm. +A generator that is a good approximation to either scenario could be considered to generate +high-quality synthetic data. Multiple synthetic datasets are only beneficial +in the first scenario, which shows that the synthetic data generation algorithm should have +similar randomness to the real data generating process, and not just return a dataset that is +close the the real one with little randomness. + +\subsection{Estimating the effect of Multiple Synthetic Datasets}\label{sec:estimating-effect-multiple-syn-datasets} +Next, we consider estimating the variance terms MV and SDV in +Theorem~\ref{thm:mse-synthetic-data-decomposition} from a small number of synthetic +datasets and a test set. These estimates can then be used to asses if more synthetic +datasets should be generated, and how many more are useful. + +We can simplify Theorem~\ref{thm:mse-synthetic-data-decomposition} to +\begin{equation} + \mathrm{MSE} = \frac{1}{m}\mathrm{MV} + \frac{1}{m}\mathrm{SDV} + \mathrm{Others}, + \label{eq:simple-mse-decomposition} +\end{equation} +where Others does not depend on the number of synthetic datasets $m$. +The usefulness of more synthetic datasets clearly depends on the magnitude of +$\mathrm{MV} + \mathrm{SDV}$ compared to Others. + +Since MSE depends on $m$, we can add a subscript to denote the $m$ in question: +$\mathrm{MSE}_m$. Now \eqref{eq:simple-mse-decomposition} gives +\begin{equation} + \mathrm{MSE}_{m} + = \mathrm{MSE}_{1} - \left(1 - \frac{1}{m}\right)(\mathrm{MV} + \mathrm{SDV}). + \label{eq:mse-m-estimator} +\end{equation} +If we have two synthetic datasets, we can estimate $\mathrm{MV} + \mathrm{SDV}$: +\begin{equation} + \begin{split} + \mathrm{MV} + \mathrm{SDV} + = 2(\mathrm{MSE}_{1} - \mathrm{MSE}_{2}). + \end{split} +\end{equation} + +If we have more than two synthetic datasets, we can set $x_m = 1 - \frac{1}{m}$ and +$y_m = \mathrm{MSE}_m$ in \eqref{eq:mse-m-estimator}: +\begin{equation} + y_m = y_1 + x_m(\mathrm{MV} + \mathrm{SDV}), + \label{eq:mv+sdv-estimator-linear-regression} +\end{equation} +so we can estimate $\mathrm{MV} + \mathrm{SDV}$ from linear regression on $(x_m, y_m)$. +However, this will likely have a limited effect on the accuracy of the $\mathrm{MSE}_m$ +estimates, as it will not +reduce the noise in estimating $\mathrm{MSE}_{1}$, which has a significant effect in +\eqref{eq:mse-m-estimator}. + +All terms in \eqref{eq:simple-mse-decomposition}-\eqref{eq:mv+sdv-estimator-linear-regression} +depend on the target features $x$. We would like our estimates to be useful +for typical $x$, so we will actually want to estimate +$\E_x(\mathrm{MSE}_m)$. Equations +\eqref{eq:simple-mse-decomposition}-\eqref{eq:mv+sdv-estimator-linear-regression} +remain valid if we take the expectation over $x$, so we can simply replace +the MSE terms with their estimates that are computed from a test set. + +Computing the estimates in practice will require that the privacy risk of publishing the +test MSE is considered acceptable. The MSE for the estimate can also be computed +from a separate validation set to avoid overfitting to the test set, but the risk +of overfitting is small in this case, as $m$ has a monotonic effect on the MSE. +Both of these caveats can be avoided by choosing $m$ using the rule of thumb that +$m$ synthetic datasets give a $1 - \frac{1}{m}$ of the potential benefit of multiple +synthetic datasets, which is a consequence of \eqref{eq:mse-m-estimator}. + +Note that this MSE estimator can be applied to bagging ensembles~\citep{breimanBaggingPredictors1996} +like random forests~\citep{breimanRandomForests2001}, since bootstrapping is a very simple form +of synthetic data generation. + + +\subsection{Differentially Private Synthetic Data Generators}\label{sec:mse-decomposition-dp} +Generating and releasing multiple synthetic datasets could increase the associated +disclosure risk. One solution to this is \emph{differential privacy} +(DP)~\citep{dworkCalibratingNoiseSensitivity2006,dworkAlgorithmicFoundationsDifferential2014}, +which is a property of an algorithm that formally bounds to privacy leakage that can +result from releasing the output of that algorithm. DP gives a quantitative upper +bound on the privacy leakage, which is known as the privacy budget. +Achieving DP requires adding extra noise to some point in the algorithm, lowering the +utility of the result. + +If the synthetic data is to be generated with DP, there are two possible ways +to handle the required noise addition. The first is splitting the privacy budget across the $m$ +synthetic datasets, and run the DP generation algorithm separately $m$ times. +Theorem~\ref{thm:mse-synthetic-data-decomposition} applies in this setting. However, it is not +clear if multiple synthetic datasets are beneficial in this case, as splitting the privacy +budget requires adding more noise to each synthetic dataset. This also means that the rule of +thumb from Section~\ref{sec:estimating-effect-multiple-syn-datasets} will not apply. +Most DP synthetic data generation algorithm would fall into this +category~\citep{ +aydoreDifferentiallyPrivateQuery2021, +chenGSWGANGradientsanitizedApproach2020, +harderDPMERFDifferentiallyPrivate2021, +hardtSimplePracticalAlgorithm2012, +liuIterativeMethodsPrivate2021, +mckennaGraphicalmodelBasedEstimation2019, +mckennaWinningNISTContest2021} if used to generate multiple synthetic datasets. + +The second possibility is generating all synthetic datasets based on a single application of a +DP mechanism. Specifically, a noisy summary $\sdp$ of the real data is released under DP. +The parameters $\theta_{1:m}$ are then sampled i.i.d.\ conditional on $\sdp$, and the +synthetic datasets are sampled conditionally on the $\theta_{1:m}$. This setting includes +algorithms that release a posterior distribution under DP, and use the posterior to +generate synthetic data, like the NAPSU-MQ algorithm~\citep{raisaNoiseawareStatisticalInference2023} and +DP variational inference +(DPVI)~\citep{jalkoDifferentiallyPrivateVariational2017,jalkoPrivacypreservingDataSharing2021}.\footnote{ +In DPVI, $\sdp$ would be the variational approximation to the posterior. +} + +The synthetic datasets are not i.i.d.\ given the real data in the second setting, so the setting +described in Section~\ref{sec:problem-setting} and assumed in +Theorem~\ref{thm:mse-synthetic-data-decomposition} does not apply. However, the synthetic +datasets are i.i.d.\ given the noisy summary $\sdp$, so we obtain a similar decomposition as before. +\begin{restatable}{theorem}{theoremmsedpsyntheticdatadecomposition}\label{thm:mse-dp-synthetic-data-decomposition} + Let generator parameters $\theta_{1:m}$ be i.i.d.\ given a DP summary $\sdp$ of the real + data $D_r$, let the synthetic datasets be $D_s^{i} \sim p(D_s | \theta_i)$ independently, and + let $\hat{g}(x; D_s^{1:m}) = \frac{1}{m}\sum_{i=1}^m g(x; D_s^i)$. Then + \begin{equation} + \begin{split} + \mathrm{MSE} &= \frac{1}{m}\mathrm{MV} + \frac{1}{m}\mathrm{SDV} + + \mathrm{RDV} + \mathrm{DPVAR} + \\ &+\, (\mathrm{SDB} + \mathrm{MB})^2 + \Var_{y}[y], + \end{split} + \label{eq:mse-decomposition-latent-var-mean-model} + \end{equation} + where + \begin{align} + \mathrm{MSE} &= \E_{y, D_r, \sdp, D_s^{1:m}}[(y - \hat{g}(x; D_s^{1:m}))^2] \\ + \mathrm{MV} &= \E_{D_r, \sdp, \theta} \Var_{D_s|\theta}[g(x; D_s)] + \end{align} + \begin{align} + \mathrm{SDV} &= \E_{D_r, \sdp}\Var_{\theta | \sdp}\E_{D_s | \theta}[g(x; D_s)] \\ + \mathrm{RDV} &= \Var_{D_r}\E_{D_s|D_r}[g(x; D_s)] \\ + \mathrm{DPVAR} &= \E_{D_r}\Var_{\sdp|D_r}\E_{D_s|\sdp}[g(x; D_s)] \\ + \mathrm{SDB} &= \E_{D_r, \sdp}[f(x) - \E_{\theta | \sdp}[f_\theta(x)]] \\ + \mathrm{MB} &= \E_{D_r, \sdp, \theta}[f_\theta(x) - \E_{D_s|\theta}[g(x; D_s)]]. + \end{align} + $f(x) = \E_{y}[y]$ is the optimal predictor for real data, + $\theta$ is a single sample from the distribution of + $\theta_i$, $D_s\sim p(D_s | \theta)$ is + a single sample of the synthetic data generating process, and + $f_\theta$ is the optimal predictor for the synthetic data generating process + given parameters $\theta$. All random quantities are implicitly conditioned on $x$. +\end{restatable} + +The takeaways from Theorem~\ref{thm:mse-dp-synthetic-data-decomposition} are mostly the same +as from Theorem~\ref{thm:mse-synthetic-data-decomposition}, and the estimator from +Section~\ref{sec:estimating-effect-multiple-syn-datasets} also applies. The main difference is the +DPVAR term in Theorem~\ref{thm:mse-dp-synthetic-data-decomposition}, which accounts for the +added DP noise. As expected, the impact of DPVAR cannot be reduced with additional synthetic +datasets. + + +\section{Experiments}\label{sec:experiments} + +\begin{figure*} + \centering + \includegraphics[width=0.9\textwidth]{figures/synthetic-data-algo-evaluation/generator-comparison.pdf} + \caption{ + Comparison of synthetic data generation algorithms for several prediction algorithms on + the California housing dataset, with 1 to 10 synthetic datasets. + DDPM and synthpop achieve smaller MSE in the downstream predictions, + so they were selected for further experiments. SP-P and SP-IP are the proper and improper + variants of synthpop, and DDPM-KL is DDPM with KL divergence loss. 1-NN and 5-NN are + nearest neighbours with 1 and 5 neighbours. + The dashed black lines show the performance of each + prediction algorithm on the real data, and the solid black line shows the performance of the best + predictor, random forest, on the real data. The results are averaged over 3 repeats, with different train-test splits. + The error bars are 95\% confidence intervals formed by bootstrapping + over the repeats. Linear regression was omitted, as it had nearly identical results as ridge regression. Table~\ref{table:synthetic-data-algo-comparison} in the Appendix contains + the numbers in the plots, including ridge regression. + } + \label{fig:synthetic-data-algo-comparison} +\end{figure*} + +In this section, we describe our experiments. The common theme in all +of them is generating synthetic data and evaluating the performance of several downstream +prediction algorithms trained on the synthetic data. The performance evaluation uses a +test set of real data, which is split from the whole dataset before generating synthetic data. + +The downstream algorithms we consider are nearest neighbours with 1 or 5 neighbours (1-NN and 5-NN), +decision tree (DT), random forest (RF), gradient boosted trees (GB), a multilayer perceptron (MLP) and a +support vector machine (SVM) for both classification and regression. We also +use linear regression (LR) and ridge regression (RR) on regression tasks, and logistic regression (LogR) on +classification tasks, though we omit linear regression from the main text plots, as its results +are nearly identical to ridge regression. Decision trees and 1-NN have a very high variance, +as both interpolate +the training data with the hyperparameters we use. Linear, ridge, and logistic regression +have fairly small variance in contrast. Appendix~\ref{sec:experiment-details} contains more +details on the experimental setup, including details on the datasets, and downstream +algorithm hyperparameters. + +\subsection{Synthetic Data Generation Algorithms}\label{sec:synthetic-data-algorithms-comparison} +First, we compare several synthetic data generation algorithms to see which algorithms are most +interesting for subsequent experiments. We use the California housing dataset, where the downstream +task is regression. The algorithms we compare are +DDPM~\citep{kotelnikovTabDDPMModellingTabular2023}, TVAE~\citep{xuModelingTabularData2019}, +CTGAN~\citep{xuModelingTabularData2019} and synthpop~\citep{nowokSynthpopBespokeCreation2016}. +DDPM, TVAE and CTGAN are a diffusion model, +a variational autoencoder and a GAN that are designed for tabular data. We use the implementations from the +synthcity library\footnote{\url{https://github.com/vanderschaarlab/synthcity}} for these. +Synthpop generates synthetic data by sampling one +column from the real data, and generating the other columns by sequentially training a predictive model on the +real data, and predicting the next column from the already generated ones. We use the implementation +from the authors~\citep{nowokSynthpopBespokeCreation2016}. + +We use the default hyperparameters +for all of the algorithms. Synthpop and DDPM have a setting that could potentially affect the randomness in +the synthetic data generation, so we include both possibilities for these settings in this experiment. +For synthpop, this setting is whether the synthetic data is generated from a Bayesian posterior predictive +distribution, which synthpop calls ``proper'' synthetic data. For DDPM, this setting is whether the loss +function is MSE or KL divergence. In the plots, the two variants of synthpop are called ``SP-P'' and ``SP-IP'' for the +proper and improper variants, and the variants of DDPM are ``DDPM'' and ``DDPM-KL''. + +\begin{figure*} + \begin{subfigure}{0.49\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/regression/abalone-by-method.pdf} + \caption{Abalone} + \label{fig:abalone-results} + \end{subfigure} + \begin{subfigure}{0.49\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/regression/ACS2018-by-method.pdf} + \caption{ACS 2018} + \label{fig:acs2018-results} + \end{subfigure} + \begin{subfigure}{0.49\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/regression/california-housing-by-method.pdf} + \caption{California Housing} + \label{fig:california-housing-results} + \end{subfigure} + \begin{subfigure}{0.49\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/regression/insurance-by-method.pdf} + \caption{Insurance} + \label{fig:insurance-results} + \end{subfigure} + \caption{ + MSE of the ensemble of downstream predictors on the regression datasets, with varying + number of synthetic datasets $m$ from either DDPM or synthpop (SP-P). Increasing the number of + synthetic datasets generally decreases MSE, especially for decision trees and 1-NN. + The predictors are nearest neighbours + with 1 or 5 neighbours (1-NN and 5-NN), decision tree (DT), random forest (RF), a multilayer perceptron (MLP), + gradient boosted trees (GB), a support vector machine (SVM) and ridge regression (RR). + The black line is the MSE of the best predictor on real data. + The results are averaged over 3 repeats. The error bars are 95\% confidence intervals formed + by bootstrapping over the repeats. + We omitted linear regression from the plot, as it had almost identical results to + ridge regression. + Tables~\ref{table:abalone-results} to \ref{table:insurance-results} in the Appendix contain + the numbers from the plots, including ridge regression. + } + \label{fig:regression-results} +\end{figure*} + +The results of the comparison are shown in Figure~\ref{fig:synthetic-data-algo-comparison}. We see that synthpop and +DDPM with MSE loss generally outperform the other generation algorithms, so we select them for +the subsequent experiments. There is very little difference between the two variants of synthpop, +so we choose the ``proper'' variant due to its connection with the Bayesian reasoning for using +multiple synthetic datasets. + +\subsection{Main Experiment}\label{sec:main-experiment} + +As our main experiment, we evaluate the performance of the synthetic data ensemble on 7 datasets, +containing 4 regression and 3 classification tasks. See Appendix~\ref{sec:dataset-details} for +details on the datasets. We use both DDPM and synthpop as the synthetic data generator, and +generate 32 synthetic datasets, of which between 1 and 32 are used to train the ensemble. +The results are averaged over 3 runs with different train-test splits. + +On the regression datasets, our error metric is MSE, which is the subject of +Theorem~\ref{thm:mse-synthetic-data-decomposition}. The results in +Figure~\ref{fig:regression-results} show that +a larger number of synthetic datasets generally decreases MSE. The decrease is especially clear +with synthpop and downstream algorithms that have a high variance like decision trees and +1-NN. Low-variance algorithms like ridge regression have very little if any +decrease from multiple synthetic datasets. This is consistent with +Theorem~\ref{thm:mse-synthetic-data-decomposition}, where the number of synthetic datasets +only affects the variance-related terms. + +On the classification datasets, we consider 4 error metrics. Brier +score~\citep{brier1950verification} is MSE of the class probability +predictions, so Theorem~\ref{thm:mse-synthetic-data-decomposition} applies to it. Cross entropy is +a Bregman divergence, so Theorem~\ref{thm:bregman-synthetic-data-decomposition} from +Appendix~\ref{app:bregman-divergence-decomposition} applies to it. +We also included accuracy and area under the ROC curve (AUC) even though our theory does not +apply to them, as they are common and interpretable error metrics, so it is interesting +to see how multiple synthetic datasets affect them. We use their complements in the plots, so +that lower is better for all plotted metrics. We only present the Brier score results +in the main text in Figure~\ref{fig:classification-results}, and defer the rest +to Figures~\ref{fig:adult-results}, \ref{fig:breast-cancer-results} and +\ref{fig:german-credit-results} in Appendix~\ref{sec:extra-results}. + +\begin{figure*} + \begin{subfigure}{\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/classification/adult-brier-by-method.pdf} + \caption{Adult} + \label{fig:adult-brier-results} + \end{subfigure} + \begin{subfigure}{\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/classification/breast-cancer-brier-by-method.pdf} + \caption{Breast Cancer} + \label{fig:breast-cancer-brier-results} + \end{subfigure} + \begin{subfigure}{\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/classification/german-credit-brier-by-method.pdf} + \caption{German Credit} + \label{fig:german-credit-brier-results} + \end{subfigure} + \caption{ + Brier score of the ensemble of downstream predictors with varying numbers + of synthetic datasets $m$, generated by either DDPM and synthpop, and averaging either probabilities or + log probabilities, on the classification datasets. Increasing the number of synthetic + datasets generally decreases the score, except in cases where one synthetic dataset is + already enough to meet the real data performance. Probability averaging is generally + outperforming log probability averaging. + The predictors are logistic regression nearest neighbours + with 1 or 5 neighbours (1-NN and 5-NN), decision tree (DT), random forest (RF), a multilayer perceptron (MLP), + gradient boosted trees (GB), a support vector machine (SVM) and logistic regression (LogR). + The black line show the loss of the best + downstream predictor trained on real data. The results are averaged over 3 repeats with + different train-test splits. The error bars are 95\% confidence intervals formed by + bootstrapping over the repeats. Tables~\ref{table:adult-brier-results} to + \ref{table:german-credit-brier-results} in the Appendix contain the numbers from the plots. + } + \label{fig:classification-results} +\end{figure*} + +Because Theorem~\ref{thm:bregman-synthetic-data-decomposition} only applies to cross entropy when +averaging log probabilities instead of probabilities, we compare both ways of averaging. + +The results on the classification datasets in Figure~\ref{fig:classification-results} +are similar to the regression experiment. A larger +number of synthetic datasets generally decreases the score, especially for the +high-variance models. Logistic regression is an exception, as it has low variance, as are +algorithms that match the performance on the real data already with one synthetic dataset. + +Probability averaging generally outperforms log probability averaging on Brier score and +cross entropy. There is very little difference between the two on accuracy and AUC, which suggests +that probability averaging is generally better than log-probability averaging. + +In Appendix~\ref{app:variance-estimation-experiment}, we estimate the MV and SDV terms of +the decomposition in Theorem~\ref{thm:mse-synthetic-data-decomposition}. These confirm +our claims on the model variances: decision trees and 1-NN have a high variance, while +linear, ridge and logistic regression have a low variance. + +\subsection{Predicting MSE from Two Synthetic Datasets}\label{sec:mse-prediction-experiment} +Next, we evaluate the predictions our rule of thumb from +Section~\ref{sec:estimating-effect-multiple-syn-datasets} makes. To recap, our rule of +thumb predicts that the maximal benefit from multiple synthetic datasets is +$2(\mathrm{MSE}_1 - \mathrm{MSE}_2)$, and $m$ synthetic datasets achieve +a $1 - \frac{1}{m}$ fraction of this benefit. + +To evaluate the predictions from the rule, we estimate the MSE on regression tasks +and Brier score on classification tasks for one and two synthetic datasets from the +test set. The setup is otherwise identical to Section~\ref{sec:main-experiment}, +and the train-test splits are the same. +We plot the predictions from the rule, and compare them with +the measured test errors with more than two synthetic datasets. + +Figure~\ref{fig:mse-prediction-acs-2018} contains the results for the ACS 2018 dataset, +and Figures~\ref{fig:mse-prediction-regression1} to \ref{fig:mse-prediction-classification2} +in the Appendix contain the results for the other datasets. The rule of thumb is very accurate +on ACS 2018 and reasonably accurate on the other datasets. The variance of the prediction +depends heavily on the variance of the errors computed from the test data. + +We also evaluated the rule on random forests without synthetic data, as it also applies to +them. In this setting, the number of trees in the random forest is analogous to the +number of synthetic datasets. We use the same datasets as in the previous experiments and +use the same train-test splits of the real data. The results are in +Figure~\ref{fig:random-forest-mse-prediction} in the Appendix. The prediction is accurate +when the test error is accurate, but can have high variance. + +\begin{figure*} + \centering + \includegraphics[width=\textwidth]{figures/regression/ACS2018-mse-est-small.pdf} + \caption{MSE prediction on the ACS 2018 dataset. The prediction is very accurate on this + dataset. The solid lines for DDPM and synthpop (SP-P) show the same MSEs as Figure~\ref{fig:regression-results}, + while the dashed lines show predicted MSEs. + 1-NN and 5-NN are nearest neighbours with 1 or 5 neighbours. + We omitted downstream algorithms with uninteresting flat curves. See + Figure~\ref{fig:mse-prediction-regression1} in the appendix for the full + plot, and Figure~\ref{fig:mse-prediction-regression2} plots for the other datasets. + Table~\ref{table:mse-prediction-acs-2018} in the Appendix contains the + numbers from the plots. The results are averaged over 3 + repeats with different train-test splits. The error bands are 95\% confidence + intervals formed by bootstrapping over the repeats. + } + \label{fig:mse-prediction-acs-2018} +\end{figure*} + +\section{Discussion}\label{sec:discussion} + +\paragraph{Limitations} +Our theory assumes that the synthetic datasets are generated i.i.d.\ given either +the real data or a DP summary. This means our theory will not apply to more complicated +schemes for generating multiple synthetic datasets, like excluding generators +based on some criteria~\citep{dikiciConstrainedGenerativeAdversarial2020} or +collecting the generators from different points of a single training +run~\citep{wangEnsemblesGenerativeAdversarial2016}. However, i.i.d.\ +synthetic datasets are still very relevant, as they are the simplest case, +and uncertainty estimation techniques for statistical inference with synthetic +data~\citep{raghunathanMultipleImputationStatistical2003,raisaConsistentBayesianInference2023} +require i.i.d.\ synthetic datasets. + +\paragraph{Conclusion} +We derived bias-variance decompositions for using synthetic data in several cases: +for MSE or Brier score with i.i.d.\ synthetic datasets given the real data +and MSE with i.i.d.\ synthetic datasets given a DP summary of the real data. +The decompositions make actionable predictions, such as yielding a simple rule of +thumb that can be used to select the number of synthetic datasets. +We empirically examined the performance +of generative ensembles on several real datasets and downstream predictors, +and found that the predictions of the theory generally hold in practice. +These findings significantly increase the theoretical understanding of +generative ensembles, which is very limited in prior literature. + +\section*{Acknowledgements} +This work was supported by the Research Council of Finland +(Flagship programme: Finnish Center for Artificial Intelligence, +FCAI as well as Grants 356499 and 359111), the Strategic Research Council +at the Research Council of Finland (Grant 358247) +as well as the European Union (Project +101070617). Views and opinions expressed are however +those of the author(s) only and do not necessarily reflect +those of the European Union or the European Commission. Neither the European +Union nor the granting authority can be held responsible for them. +The authors wish to thank the Finnish Computing Competence +Infrastructure (FCCI) for supporting this project with +computational and data storage resources. + +\section*{Broader Impacts} +Generating multiple synthetic datasets can potentially increase disclosure +risk if appropriate precautions, such as differential privacy, are not used. +Because of this, care should be taken and precautions like differential privacy +should be used if possible when releasing multiple synthetic datasets +of sensitive data, despite our theory suggesting that multiple synthetic +datasets are beneficial for utility. + +\bibliography{Multiple_Synthetic_Datasets_ML} +\bibliographystyle{plainnat} + + +\newpage +\appendix +\onecolumn + +\section{Missing Proofs}\label{sec:missing-proofs} + +\theoremmsesyntheticdatadecomposition* +\begin{proof} + With $m$ synthetic datasets $D_s^{1:m}$ and model $\hat{g}(x, D_s^{1:m})$ that combines the synthetic + datasets, the classical bias-variance decomposition gives + \begin{align} + \E_{y, D_r, D_s^{1:m}}[(y - \hat{g}(x; D_s^{1:m}))^2] + = (f(x) - \E_{D_r, D_s^{1:m}}[\hat{g}(x; D_s^{1:m})])^2 + + \Var_{D_r, D_s^{1:m}}[\hat{g}(x; D_s^{1:m})] + \Var_{y}[y]. + \end{align} + + Using the independence of the synthetic datasets, these can be decomposed further: + \begin{equation} + \E_{D_r, D_s^{1:m}}[\hat{g}(x; D_s^{1:m})] + = \frac{1}{m}\E_{D_r}\E_{D_s^{1:m}|D_r}\left[\sum_{i=1}^m g(x, D_s^i)\right] + = \E_{D_r}\E_{D_s|D_r}[g(x, D_s)] + = \E_{D_r, D_s}[g(x, D_s)], + \end{equation} + + \begin{equation} + \begin{split} + \Var_{D_r, D_s^{1:m}}[\hat{g}(x; D_s^{1:m})] + &= \E_{D_r}\Var_{D_s^{1:m}|D_r}\left[\frac{1}{m} \sum_{i=1}^m g(x; D_s^i)\right] + + \Var_{D_r}\E_{D_s^{1:m}|D_r}\left[\frac{1}{m} \sum_{i=1}^m g(x; D_s^i)\right] + \\&= \frac{1}{m^2} \E_{D_r}\Var_{D_s^{1:m}|D_r}\left[\sum_{i=1}^m g(x; D_s^i)\right] + + \Var_{D_r}\E_{D_s|D_r}\left[g(x; D_s)\right] + \\&= \frac{1}{m} \E_{D_r}\Var_{D_s|D_r}\left[g(x; D_s)\right] + + \Var_{D_r}\E_{D_s|D_r}\left[g(x; D_s)\right], + \label{eq:variance-decomposition-mean-model} + \end{split} + \end{equation} + and + \begin{equation} + \Var_{D_s|D_r}[g(x; D_s)] = \E_{\theta|D_r}\Var_{D_s|\theta}[g(x; D_s)] + \Var_{\theta|D_r}\E_{D_s|\theta}[g(x, D_s)]. + \end{equation} + + The bias can be decomposed with $f_\theta(x)$: + \begin{equation} + \begin{split} + f(x) - \E_{D_r, D_s}[g(x; D_s)] + &= \E_{D_r, \theta}[f(x) - f_\theta(x) + f_\theta(x) - \E_{D_s|\theta}[g(x; D_s)]] + \\&= \E_{D_r}[f(x) - \E_{\theta|D_r}[f_\theta(x)]] + + \E_{D_r, \theta}[f_\theta(x) - \E_{D_s|\theta}[g(x; D_s)]] + \end{split} + \end{equation} + + Combining all of these gives the claim. +\end{proof} + +\theoremmsedpsyntheticdatadecomposition* +\begin{proof} + Using $\sdp$ in place of the real data in Theorem~\ref{thm:mse-synthetic-data-decomposition} + gives + \begin{equation} + \mathrm{MSE} = \frac{1}{m}\mathrm{MV} + \frac{1}{m}\mathrm{SDV} + + \Var_{D_r, \sdp}\E_{D_s|\sdp}[g(x; D_s)] + + (\mathrm{SDB} + \mathrm{MB})^2 + \Var_{y}[y], + \end{equation} + where + \begin{align} + \mathrm{MSE} &= \E_{y, D_r, \sdp, D_s^{1:m}}[(y - \hat{g}(x; D_s^{1:m}))^2] \\ + \mathrm{MV} &= \E_{D_r, \sdp, \theta} \Var_{D_s|\theta}[g(x; D_s)] \\ + \mathrm{SDV} &= \E_{D_r, \sdp}\Var_{\theta | \sdp}\E_{D_s | \theta}[g(x; D_s)] \\ + \mathrm{SDB} &= \E_{D_r, \sdp}[f(x) - \E_{\theta | \sdp}[f_\theta(x)]] \\ + \mathrm{MB} &= \E_{D_r, \sdp, \theta}[f_\theta(x) - \E_{D_s|\theta}[g(x; D_s)]]. + \end{align} + + We can additionally decompose + \begin{equation} + \begin{split} + \Var_{D_r, \sdp}\E_{D_s|\sdp}[g(x; D_s)] + &= \E_{D_r}\Var_{\sdp|D_r}\E_{D_s|\sdp}[g(x; D_s)] + + \Var_{D_r}\E_{\sdp|D_r}\E_{D_s|\sdp}[g(x; D_s)] + \\&= \E_{D_r}\Var_{\sdp|D_r}\E_{D_s|\sdp}[g(x; D_s)] + + \Var_{D_r}\E_{D_s|D_r}[g(x; D_s)] + \end{split} + \end{equation} + This reveals the DP-related variance term + \begin{equation} + \mathrm{DPVAR} = \E_{D_r}\Var_{\sdp|D_r}\E_{D_s|\sdp}[g(x; D_s)] + \end{equation} + so we have + \begin{equation} + \mathrm{MSE} = \frac{1}{m}\mathrm{MV} + \frac{1}{m}\mathrm{SDV} + + \mathrm{RDV} + \mathrm{DPVAR} + (\mathrm{SDB} + \mathrm{MB})^2 + \Var_y[y]. + \end{equation} +\end{proof} + + +\section{Bias-Variance Decomposition for Bregman Divergences}\label{app:bregman-divergence-decomposition} + +\subsection{Background: Bregman Divergences} +A Bregman divergence~\citep{bregmanRelaxationMethodFinding1967} +$D_F\colon \R^d\times \R^d \to \R$ is a loss function +\begin{equation} + D_F(y, g) = F(y) - F(g) - \nabla F(g)^T (y - g) +\end{equation} +where $F\colon \R^d \to \R$ is a strictly convex differentiable function. Many common error metrics, +like MSE and cross entropy, can be expressed as expected values of a Bregman divergence. +In fact, proper scoring rules\footnote{ + Proper scoring rules are error metrics that are minimised by predicting the correct + probabilities. +} +can be characterised via Bregman +divergences~\citep{gneitingStrictlyProperScoring2007,kimparaProperLossesDiscrete2023}. +Table~\ref{tab:bregman-divergences} shows how the metrics we consider are +expressed as Bregman divergences~\citep{guptaEnsemblesClassifiersBiasVariance2022}. + +\citet{pfauGeneralizedBiasvarianceDecomposition2013} derive the following bias-variance decomposition for +Bregman divergences: +\begin{equation} + \underbrace{\E[D(y, g)]}_{\mathrm{Error}} = \underbrace{\E[D(y, \E y)]}_{\mathrm{Noise}} + + \underbrace{D(\E y, \CP g)}_{\mathrm{Bias}} + + \underbrace{\E D(\CP g, g)}_{\mathrm{Variance}} + \label{eq:bregman-ensemble-decomposition} +\end{equation} +$y$ is the true value, and $g$ is the predicted value. All of the random quantities are conditioned on +$x$. +$\CP$ is a \emph{central prediction}: +\begin{equation} + \CP g = \argmin_{z} \E D(z, g). +\end{equation} +The variance term can be used to define a generalisation of variance: +\begin{equation} + \VP g = \E D(\CP g, g) +\end{equation} +$\CP$ and $\VP$ can also be defined conditionally on some random variable $Z$ +by making the expectations conditional on $Z$ in the definitions. +These obey generalised laws of total expectation and +variance~\citep{guptaEnsemblesClassifiersBiasVariance2022}: +\begin{equation} + \CP g = \CP_Z[\CP_{g|Z}[g]] +\end{equation} +and +\begin{equation} + \VP g = \E_Z[\VP_{g|Z}[g]] + \VP_Z[\CP_{g|Z}[g]]. +\end{equation} + +The convex dual of $g$ is $g^* = \nabla F(g)$. +The central prediction $\CP g$ can also be expressed as an expectation +over the convex dual~\citep{guptaEnsemblesClassifiersBiasVariance2022}: +\begin{equation} + \CP g = (\E g^*)^* \end{equation} + +\begin{table*}\caption{Common error metrics as Bregman divergences. $g$ denotes a prediction +in regression and $p$ denotes predicted class probabilities in classification. +$g^{(j)}$ and $p^{(j)}$ denote the predictions of different ensemble members. +$y$ is the correct value in regression, and a one-hot encoding of the correct +class in classification. The binary classification Brier score only looks at +probabilities for one class. If the multiclass Brier score is used with two +classes, it is twice the binary Brier score.} +\label{tab:bregman-divergences} +\vskip 0.15in +\begin{center} +\begin{small} +\begin{sc} +\begin{tabular}{lccr} + \toprule + Error Metric & $D_F$ & $F(t)$ & Dual Average \\ + \midrule + MSE & $(y - g)^2$ & $t^2$ & $\frac{1}{m}\sum_{j=1}^m g^{(j)}$ \\ + Brier Score (2 classes) & $(y_1 - p_1)^2$ & $t^2$ & $\frac{1}{m}\sum_{j=1}^m p^{(j)}$\\ + Brier Score (Multiclass) & $\sum_{i}(y_i - p_i)^2$ & $\sum_i t_i^2$ & $\frac{1}{m}\sum_{j=1}^m p^{(j)}$\\ + Cross Entropy & $-\sum_{i} y_i \ln p_i$ & $\sum_{i} t_i \ln t_i$ & + $\mathrm{softmax}\left(\frac{1}{m}\sum_{j=1}^m \ln p^{(j)}\right)$\\ + \bottomrule +\end{tabular} +\end{sc} +\end{small} +\end{center} +\vskip -0.1in +\end{table*} + +\citet{guptaEnsemblesClassifiersBiasVariance2022} study the bias-variance +decomposition of Bregman divergence on a generic ensemble. They show that +if the ensemble aggregates prediction by averaging them, bias is not preserved, +and can increase. As a solution, they consider \emph{dual averaging}, +that is +\begin{equation} + \hat{g} = \left(\frac{1}{m}\sum_{i=1}^m g_i^*\right)^* +\end{equation} +for models $g_1, \dotsc, g_m$ forming the ensemble $\hat{g}$. +They show that the bias is preserved in the dual averaged ensemble, and derive a +bias-variance decomposition for them. For mean squared error, the dual average is +simply the standard average, but for cross entropy, it corresponds to averaging +log probabilities. + +\subsection{Bregman Divergence Decomposition for Synthetic Data}\label{sec:bregman-decomposition} + +We extend the Bregman divergence decomposition for ensembles from \citet{guptaEnsemblesClassifiersBiasVariance2022} +to generative ensembles. +To prove Theorem~\ref{thm:bregman-synthetic-data-decomposition}, we +use the following lemma. +\begin{lemma}[\citealt{guptaEnsemblesClassifiersBiasVariance2022}, Proposition 5.3]\label{lemma:iid-dual-ensemble-mean-variance} + Let $X_1, \dotsc, X_m$ be i.i.d.\ random variables and let + $\hat{X} = (\sum_{i=1}^m X_i^*)^*$ be their dual average. + Then $\CP \hat{X} = \CP X$, $\VP \hat{X} \leq \VP X$ and for any independent $Y$, + $D(\E Y, \CP \hat{X}) = D(\E Y, \CP X)$. +\end{lemma} + +\begin{restatable}{theorem}{theorembregmansyntheticdatadecomposition}\label{thm:bregman-synthetic-data-decomposition} + When the synthetic datasets $D_s^{1:m}$ are i.i.d.\ given the real data $D_r$ and + $\hat{g}(x; D_s^{1:m}) = (\frac{1}{m}\sum_{i=1}^m g(x; D_s^i)^*)^*$, + \begin{equation} + \mathrm{Error} \leq \mathrm{MV} + \mathrm{SDV} + \mathrm{RDV} + \mathrm{Bias} + \mathrm{Noise} + \end{equation} + where + \begin{align} + \mathrm{Error} &= \E_{y, D_r, D_s^{1:m}}[D(y, \hat{g})] \\ + \mathrm{MV} &= \E_{D_r}\E_{\theta | D_r}\VP_{D_s | \theta}[g] \\ + \mathrm{SDV} &= \E_{D_r}\VP_{\theta | D_r}\CP_{D_s | \theta}[g] \\ + \mathrm{RDV} &= \VP_{D_r}\CP_{D_s|D_r}[g] \\ + \mathrm{Bias} &= D\left(\E_{y} y, \CP_{D_r}\CP_{D_s|D_r} [g]\right)\\ + \mathrm{Noise} &= \E_{y}\left[D(y, \E_y y)\right] + \end{align} +\end{restatable} +\begin{proof} + Plugging the ensemble $\hat{g}$ into the decomposition + \eqref{eq:bregman-ensemble-decomposition} gives + \begin{equation} + \E_{y, D_r, D_s^{1:m}}[D(y, \hat{g})] + = \E_{y}[D(y, \E_{y} y)] + + D(\E_{y} y, \CP_{D_r,D_s^{1:m}} \hat{g}) + + \VP_{D_r,D_s^{1:m}} [\hat{g}] + \end{equation} + + Applying the generalised laws of expectation and variance, and + Lemma~\ref{lemma:iid-dual-ensemble-mean-variance} to the variance term, + we obtain: + \begin{align} + \VP_{D_r, D_s^{1:m}} [\hat{g}] + &= \E_{D_r}\VP_{D_s^{1:m}|D_r}[\hat{g}] + \VP_{D_r}\CP_{D_s^{1:m}|D_r}[\hat{g}] + \end{align} + For the second term on the right: + \begin{equation} + \CP_{D_s^{1:m}|D_r}[\hat{g}] = \CP_{D_s|D_r}[g], + \end{equation} + which gives the RDV: + \begin{equation} + \VP_{D_r}\CP_{D_s^{1:m}|D_r}[\hat{g}] = \VP_{D_r}\CP_{D_s|D_r}[g]. + \end{equation} + For the first term on the right: + \begin{equation} + \E_{D_r}\VP_{D_s^{1:m}|D_r}[\hat{g}] \leq \E_{D_r}\VP_{D_s|D_r}[g] + \end{equation} + and + \begin{equation} + \VP_{D_s|D_r}[g] = \E_{\theta | D_r}\VP_{D_s | \theta}[g] + + \VP_{\theta | D_r}\CP_{D_s | \theta}[g], + \end{equation} + which give MV and SDV. + + For the bias + \begin{equation} + \begin{split} + D(\E_{y} y, \CP_{D_r, D_s^{1:m}} [\hat{g}]) + &= D(\E_{y} y, \CP_{D_r}\CP_{D_s^{1:m}|D_r} [\hat{g}]) + \\&= D(\E_{y} y, \CP_{D_r}\CP_{D_s|D_r} [g]). + \end{split} + \end{equation} + Putting everything together proves the claim. +\end{proof} +This decomposition is not as easy to interpret as the other two in +Section~\ref{sec:mse-decomposition}, as is only gives +an upper bound, and does not explicitly depend on the number of synthetic datasets. + + +\section{Experimental Details}\label{sec:experiment-details} + +\subsection{Datasets}\label{sec:dataset-details} +In our experiments, we use 7 tabular datasets. For four of them, the downstream prediction +task is regression, and for the other three, the prediction task is binary classification. +Table~\ref{tab:dataset-information} lists some general information on the datasets. +We use 25\% of the real data as a test set, with the remaining 75\% being used to generate +the synthetic data, for all of the datasets. All experiments are repeated several times, +with different train-test splits for each repeat. + +\begin{table}\caption{Details on the datasets used in the experiments. \# Cat. and \# Num. are the +numbers of categorical and numerical features, not counting the target variable. For datasets +with removed rows, the table shows the number of rows after the removals.} +\label{tab:dataset-information} +\vskip 0.15in +\begin{center} +\begin{small} +\begin{sc} +\begin{tabular}{lcccr} + \toprule + Dataset & \# Rows & \# Cat. & \# Num. & Task \\ + \midrule + Abalone & 4177 & 1 & 7 & Regression \\ + ACS 2018 & 50000 & 5 & 2 & Regression \\ + Adult & 45222 & 8 & 4 & Classification \\ + Breast Cancer & 569 & 0 & 30 & Classification \\ + California Housing & 20622 & 0 & 8 & Regression \\ + German Credit & 1000 & 13 & 7 & Classification \\ + Insurance & 1338 & 3 & 3 & Regression \\ + \bottomrule +\end{tabular} +\end{sc} +\end{small} +\end{center} +\vskip -0.1in +\end{table} + +\paragraph{Abalone} \citep{nashAbalone1995} The abalone dataset contains information on abalones, +with the task of predicting the number of rings on the abalone from the other information. + +\paragraph{ACS 2018} +(\url{https://www.census.gov/programs-surveys/acs/microdata/documentation.2018.html}) +This dataset contains several variables from the American community survey (ACS) of 2018, +with the task of predicting a person's income from the other features. Specifically, the variables we selected are +AGEP (age), COW (employer type), SCHL (education), MAR (marital status), WKHP (working hours), +SEX, RAC1P (race), and the target PINCP (income). We take a subset of 50000 datapoints +from the California data, and log-transform the target variable. We used the +folktables package~\citep{dingRetiringAdultNew2021} to download the given subset of the +data. + +\paragraph{Adult} \citep{kohaviAdult1996} +The UCI Adult dataset contains general information on people, with the task of predicting +whether their income is over \$50000. We drop rows with any missing values. + +\paragraph{Breast Cancer} \citep{wolbergBreastCancerWisconsin1995} +The breast cancer dataset contains features derived from images of potential tumors, +with the task of predicting whether the potential tumor is benign or malignant. + +\paragraph{California Housing} (\url{https://scikit-learn.org/stable/datasets/real_world.html#california-housing-dataset}) +The california housing dataset contains information on housing districts, specifically census block groups, +in California. The task is predicting the median house value in the district. We removed outlier rows where the +average number of rooms is at least 50, or the average occupancy is at least 30. According to +the dataset description, these likely correspond to districts with many empty houses. We +log-transformed the target variable, as well as the population and median income features. + +\paragraph{German Credit} \citep{hofmannStatlogGermanCredit1994} +The German credit dataset contains information on a bank's customers, with the task of predicting +whether the customers are ``good'' or ``bad''. + +\paragraph{Insurance} +(\url{https://www.kaggle.com/datasets/mirichoi0218/insurance/data}) +The insurance dataset contains general information on people, like age, gender and BMI, as +well as the amount they charged their medical insurance, which is the variable to predict. +We take a log transform of the target variable before generating synthetic data. + +\subsection{Downstream Prediction Algorithms}\label{sec:downstream-model-details} + +We use the scikit-learn\footnote{\url{https://scikit-learn.org/stable/index.html}} implementations +of all of the downstream algorithms, which includes probability predictions for all algorithms on the +classification tasks. We standardise the data before training for all downstream +algorithms except the tree-based algorithms, specifically decision tree, random forest, and gradient +boosted trees. This standardisation is done just before downstream training, so the input to the +synthetic data generation algorithms is not standardised. We use the default hyperparameters of +scikit-learn for all downstream algorithms except MLP, where we increased the maximum number of iterations +to 1000, as the default was not enough to converge on some datasets. In particular, this means that +decision trees are trained to interpolate the training data, resulting in high variance of the predictions. + +\newpage +\section{Extra Results}\label{sec:extra-results} +\begin{figure*}[h] + \centering + \includegraphics[width=\textwidth]{figures/classification/adult-all-metrics.pdf} + \caption{ + All error metrics on the Adult dataset. Note the logarithmic scale on the + cross entropy y-axis. The predictors are nearest neighbours + with 1 or 5 neighbours (1-NN and 5-NN), decision tree (DT), random forest (RF), a multilayer perceptron (MLP), + gradient boosted trees (GB), a support vector machine (SVM) and logistic regression (LogR). + The black line show the loss of the best + downstream predictor trained on real data. The results are averaged over 3 repeats with + different train-test splits. The error bars are 95\% confidence intervals formed by + bootstrapping over the repeats. + } + \label{fig:adult-results} +\end{figure*} + +\begin{figure*}[h] + \centering + \includegraphics[width=\textwidth]{figures/classification/breast-cancer-all-metrics.pdf} + \caption{ + All error metrics on the breast cancer dataset. Note the he logarithmic scale on the + cross entropy y-axis. The predictors are nearest neighbours + with 1 or 5 neighbours (1-NN and 5-NN), decision tree (DT), random forest (RF), a multilayer perceptron (MLP), + gradient boosted trees (GB), a support vector machine (SVM) and logistic regression (LogR). + The black line show the loss of the best + downstream predictor trained on real data. The results are averaged over 3 repeats with + different train-test splits. The error bars are 95\% confidence intervals formed by + bootstrapping over the repeats. + } + \label{fig:breast-cancer-results} +\end{figure*} + +\begin{figure*}[h] + \centering + \includegraphics[width=\textwidth]{figures/classification/german-credit-all-metrics.pdf} + \caption{ + All error metrics on the German credit dataset. Note the he logarithmic scale on the + cross entropy y-axis. The predictors are nearest neighbours + with 1 or 5 neighbours (1-NN and 5-NN), decision tree (DT), random forest (RF), a multilayer perceptron (MLP), + gradient boosted trees (GB), a support vector machine (SVM) and logistic regression (LogR). + The black line show the loss of the best + downstream predictor trained on real data. The results are averaged over 3 repeats with + different train-test splits. The error bars are 95\% confidence intervals formed by + bootstrapping over the repeats. + } + \label{fig:german-credit-results} +\end{figure*} + +\begin{figure*} + \begin{subfigure}{\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/regression/abalone-mse-est.pdf} + \caption{Abalone} + \end{subfigure} + \begin{subfigure}{\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/regression/ACS2018-mse-est.pdf} + \caption{ACS 2018} + \label{fig:mse-prediction-acs} + \end{subfigure} + \caption{ + MSE prediction on the first two regression datasets. The linear regression measured MSE + line for synthpop with ACS 2018 data is cut off due to excluding repeats with extremely + large MSE ($\geq 10^6$). + 1-NN and 5-NN are nearest neighbours with 1 or 5 neighbours. The results are averaged over 3 + repeats with different train-test splits. The error bands are 95\% confidence + intervals formed by bootstrapping over the repeats. + } + \label{fig:mse-prediction-regression1} +\end{figure*} +\begin{figure*} + \begin{subfigure}{\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/regression/california-housing-mse-est.pdf} + \caption{California Housing} + \end{subfigure} + \begin{subfigure}{\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/regression/insurance-mse-est.pdf} + \caption{Insurance} + \end{subfigure} + \caption{ + MSE prediction on the last two regression datasets. + 1-NN and 5-NN are nearest neighbours with 1 or 5 neighbours. The results are averaged over 3 + repeats with different train-test splits. The error bands are 95\% confidence + intervals formed by bootstrapping over the repeats. + } + \label{fig:mse-prediction-regression2} +\end{figure*} + +\begin{figure*} + \begin{subfigure}{\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/classification/adult-mse-est.pdf} + \caption{Adult} + \end{subfigure} + \begin{subfigure}{\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/classification/breast-cancer-mse-est.pdf} + \caption{Breast Cancer} + \end{subfigure} + \caption{ + Brier score prediction on two classification datasets. + 1-NN and 5-NN are nearest neighbours with 1 or 5 neighbours. The results are averaged over 3 + repeats with different train-test splits. The error bands are 95\% confidence + intervals formed by bootstrapping over the repeats. + } + \label{fig:mse-prediction-classification1} +\end{figure*} + +\begin{figure*} + \centering + \includegraphics[width=\textwidth]{figures/classification/german-credit-mse-est.pdf} + \caption{ + Brier score prediction on German credit data. + 1-NN and 5-NN are nearest neighbours with 1 or 5 neighbours. The results are averaged over 3 + repeats with different train-test splits. The error bands are 95\% confidence + intervals formed by bootstrapping over the repeats. + } + \label{fig:mse-prediction-classification2} +\end{figure*} + +\begin{figure*} + \begin{subfigure}{\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/random-forest-mse-prediction-regression.pdf} + \caption{Regression} + \label{fig:random-forest-mse-prediction-regression} + \end{subfigure} + \begin{subfigure}{\textwidth} + \centering + \includegraphics[width=0.8\textwidth]{figures/random-forest-mse-prediction-classification.pdf} + \caption{Classification} + \label{fig:random-forest-mse-prediction-classification} + \end{subfigure} + \caption{ + Random forest performance prediction on the regression datasets in (a) and classification + datasets in (b). The prediction is reasonably accurate on the datasets with accurate + estimates of the error. On the other datasets, the prediction can + have high variance. The lines show averages over 3 repeats with different train-test splits. + The error bands are 95\% confidence intervals formed by bootsrapping over the repeats. + } + \label{fig:random-forest-mse-prediction} +\end{figure*} + +\newpage + +\begin{table} + \caption{ + Table of synthetic data generator comparison results from + Figure~\ref{fig:synthetic-data-algo-comparison}. + The numbers are + the mean MSE $\pm$ standard deviation from 3 repeats. + } + \label{table:synthetic-data-algo-comparison} + \small + \centering + \begin{tabular}{llllll} +\toprule + & m & 1 & 2 & 5 & 10 \\ +Downstream & Generator & & & & \\ +\midrule +\multirow[c]{6}{*}{1-NN} & CTGAN & 0.2621 $\pm$ 0.0228 & 0.1775 $\pm$ 0.0037 & 0.1316 $\pm$ 0.0044 & 0.1164 $\pm$ 0.0040 \\ + & DDPM & 0.1365 $\pm$ 0.0029 & 0.1365 $\pm$ 0.0029 & 0.1365 $\pm$ 0.0029 & 0.1343 $\pm$ 0.0019 \\ + & DDPM-KL & 0.2447 $\pm$ 0.0082 & 0.2447 $\pm$ 0.0082 & 0.2238 $\pm$ 0.0240 & 0.2124 $\pm$ 0.0085 \\ + & SP-IP & 0.1544 $\pm$ 0.0040 & 0.1139 $\pm$ 0.0028 & 0.0892 $\pm$ 0.0018 & 0.0805 $\pm$ 0.0019 \\ + & SP-P & 0.1654 $\pm$ 0.0014 & 0.1172 $\pm$ 0.0012 & 0.0893 $\pm$ 0.0027 & 0.0801 $\pm$ 0.0015 \\ + & TVAE & 0.2195 $\pm$ 0.0036 & 0.2002 $\pm$ 0.0316 & 0.2114 $\pm$ 0.0123 & 0.1829 $\pm$ 0.0073 \\ +\cline{1-6} +\multirow[c]{6}{*}{5-NN} & CTGAN & 0.1518 $\pm$ 0.0110 & 0.1230 $\pm$ 0.0047 & 0.1125 $\pm$ 0.0050 & 0.1092 $\pm$ 0.0040 \\ + & DDPM & 0.0885 $\pm$ 0.0023 & 0.0885 $\pm$ 0.0023 & 0.0885 $\pm$ 0.0023 & 0.0881 $\pm$ 0.0020 \\ + & DDPM-KL & 0.1379 $\pm$ 0.0015 & 0.1379 $\pm$ 0.0015 & 0.1342 $\pm$ 0.0023 & 0.1320 $\pm$ 0.0023 \\ + & SP-IP & 0.0936 $\pm$ 0.0030 & 0.0852 $\pm$ 0.0027 & 0.0797 $\pm$ 0.0021 & 0.0780 $\pm$ 0.0021 \\ + & SP-P & 0.1029 $\pm$ 0.0013 & 0.0891 $\pm$ 0.0020 & 0.0814 $\pm$ 0.0015 & 0.0788 $\pm$ 0.0014 \\ + & TVAE & 0.1240 $\pm$ 0.0008 & 0.1201 $\pm$ 0.0073 & 0.1227 $\pm$ 0.0028 & 0.1161 $\pm$ 0.0018 \\ +\cline{1-6} +\multirow[c]{6}{*}{Decision Tree} & CTGAN & 0.2926 $\pm$ 0.0398 & 0.1818 $\pm$ 0.0048 & 0.1301 $\pm$ 0.0034 & 0.1136 $\pm$ 0.0024 \\ + & DDPM & 0.1332 $\pm$ 0.0076 & 0.1285 $\pm$ 0.0068 & 0.1266 $\pm$ 0.0069 & 0.1219 $\pm$ 0.0033 \\ + & DDPM-KL & 0.2395 $\pm$ 0.0041 & 0.2314 $\pm$ 0.0043 & 0.2024 $\pm$ 0.0272 & 0.1885 $\pm$ 0.0186 \\ + & SP-IP & 0.1355 $\pm$ 0.0048 & 0.0981 $\pm$ 0.0010 & 0.0762 $\pm$ 0.0014 & 0.0689 $\pm$ 0.0008 \\ + & SP-P & 0.1432 $\pm$ 0.0058 & 0.1015 $\pm$ 0.0011 & 0.0758 $\pm$ 0.0013 & 0.0669 $\pm$ 0.0010 \\ + & TVAE & 0.2086 $\pm$ 0.0063 & 0.1838 $\pm$ 0.0328 & 0.1894 $\pm$ 0.0149 & 0.1620 $\pm$ 0.0091 \\ +\cline{1-6} +\multirow[c]{6}{*}{Random Forest} & CTGAN & 0.1286 $\pm$ 0.0151 & 0.1050 $\pm$ 0.0032 & 0.1004 $\pm$ 0.0026 & 0.0988 $\pm$ 0.0024 \\ + & DDPM & 0.0681 $\pm$ 0.0020 & 0.0678 $\pm$ 0.0022 & 0.0674 $\pm$ 0.0022 & 0.0673 $\pm$ 0.0021 \\ + & DDPM-KL & 0.1113 $\pm$ 0.0018 & 0.1106 $\pm$ 0.0019 & 0.1089 $\pm$ 0.0008 & 0.1082 $\pm$ 0.0022 \\ + & SP-IP & 0.0676 $\pm$ 0.0012 & 0.0649 $\pm$ 0.0010 & 0.0626 $\pm$ 0.0008 & 0.0619 $\pm$ 0.0007 \\ + & SP-P & 0.0762 $\pm$ 0.0020 & 0.0696 $\pm$ 0.0014 & 0.0642 $\pm$ 0.0020 & 0.0629 $\pm$ 0.0018 \\ + & TVAE & 0.0912 $\pm$ 0.0013 & 0.0888 $\pm$ 0.0024 & 0.0889 $\pm$ 0.0017 & 0.0866 $\pm$ 0.0022 \\ +\cline{1-6} +\multirow[c]{6}{*}{MLP} & CTGAN & 0.1267 $\pm$ 0.0082 & 0.1054 $\pm$ 0.0105 & 0.0991 $\pm$ 0.0075 & 0.0958 $\pm$ 0.0049 \\ + & DDPM & 0.0650 $\pm$ 0.0014 & 0.0642 $\pm$ 0.0016 & 0.0638 $\pm$ 0.0007 & 0.0635 $\pm$ 0.0009 \\ + & DDPM-KL & 0.1032 $\pm$ 0.0020 & 0.1020 $\pm$ 0.0028 & 0.0999 $\pm$ 0.0023 & 0.0985 $\pm$ 0.0036 \\ + & SP-IP & 0.0702 $\pm$ 0.0018 & 0.0682 $\pm$ 0.0014 & 0.0661 $\pm$ 0.0011 & 0.0658 $\pm$ 0.0007 \\ + & SP-P & 0.0747 $\pm$ 0.0029 & 0.0705 $\pm$ 0.0006 & 0.0685 $\pm$ 0.0003 & 0.0681 $\pm$ 0.0006 \\ + & TVAE & 0.0882 $\pm$ 0.0065 & 0.0834 $\pm$ 0.0023 & 0.0818 $\pm$ 0.0019 & 0.0809 $\pm$ 0.0021 \\ +\cline{1-6} +\multirow[c]{6}{*}{Gradient Boosting} & CTGAN & 0.1270 $\pm$ 0.0162 & 0.1053 $\pm$ 0.0044 & 0.1026 $\pm$ 0.0030 & 0.1012 $\pm$ 0.0023 \\ + & DDPM & 0.0725 $\pm$ 0.0023 & 0.0725 $\pm$ 0.0023 & 0.0725 $\pm$ 0.0023 & 0.0724 $\pm$ 0.0022 \\ + & DDPM-KL & 0.1054 $\pm$ 0.0038 & 0.1054 $\pm$ 0.0038 & 0.1045 $\pm$ 0.0031 & 0.1044 $\pm$ 0.0037 \\ + & SP-IP & 0.0715 $\pm$ 0.0018 & 0.0698 $\pm$ 0.0013 & 0.0689 $\pm$ 0.0012 & 0.0685 $\pm$ 0.0011 \\ + & SP-P & 0.0745 $\pm$ 0.0023 & 0.0724 $\pm$ 0.0015 & 0.0711 $\pm$ 0.0012 & 0.0708 $\pm$ 0.0018 \\ + & TVAE & 0.0912 $\pm$ 0.0026 & 0.0900 $\pm$ 0.0018 & 0.0901 $\pm$ 0.0018 & 0.0893 $\pm$ 0.0018 \\ +\cline{1-6} +\multirow[c]{6}{*}{SVM} & CTGAN & 0.1235 $\pm$ 0.0144 & 0.1051 $\pm$ 0.0017 & 0.1005 $\pm$ 0.0024 & 0.0993 $\pm$ 0.0022 \\ + & DDPM & 0.0700 $\pm$ 0.0027 & 0.0700 $\pm$ 0.0027 & 0.0700 $\pm$ 0.0027 & 0.0700 $\pm$ 0.0027 \\ + & DDPM-KL & 0.1026 $\pm$ 0.0032 & 0.1026 $\pm$ 0.0032 & 0.1017 $\pm$ 0.0024 & 0.1013 $\pm$ 0.0034 \\ + & SP-IP & 0.0736 $\pm$ 0.0028 & 0.0725 $\pm$ 0.0026 & 0.0712 $\pm$ 0.0022 & 0.0711 $\pm$ 0.0020 \\ + & SP-P & 0.0768 $\pm$ 0.0025 & 0.0745 $\pm$ 0.0020 & 0.0728 $\pm$ 0.0018 & 0.0724 $\pm$ 0.0020 \\ + & TVAE & 0.0944 $\pm$ 0.0022 & 0.0934 $\pm$ 0.0004 & 0.0938 $\pm$ 0.0011 & 0.0919 $\pm$ 0.0023 \\ +\cline{1-6} +\multirow[c]{6}{*}{Ridge Regression} & CTGAN & 0.1587 $\pm$ 0.0182 & 0.1391 $\pm$ 0.0032 & 0.1381 $\pm$ 0.0042 & 0.1372 $\pm$ 0.0044 \\ + & DDPM & 0.1110 $\pm$ 0.0036 & 0.1110 $\pm$ 0.0036 & 0.1110 $\pm$ 0.0036 & 0.1109 $\pm$ 0.0035 \\ + & DDPM-KL & 0.1296 $\pm$ 0.0035 & 0.1296 $\pm$ 0.0035 & 0.1295 $\pm$ 0.0035 & 0.1293 $\pm$ 0.0037 \\ + & SP-IP & 0.1098 $\pm$ 0.0032 & 0.1095 $\pm$ 0.0031 & 0.1095 $\pm$ 0.0030 & 0.1096 $\pm$ 0.0030 \\ + & SP-P & 0.1109 $\pm$ 0.0034 & 0.1106 $\pm$ 0.0030 & 0.1105 $\pm$ 0.0030 & 0.1105 $\pm$ 0.0033 \\ + & TVAE & 0.1353 $\pm$ 0.0044 & 0.1343 $\pm$ 0.0053 & 0.1341 $\pm$ 0.0055 & 0.1336 $\pm$ 0.0053 \\ +\cline{1-6} +\multirow[c]{6}{*}{Linear Regression} & CTGAN & 0.1587 $\pm$ 0.0182 & 0.1391 $\pm$ 0.0032 & 0.1381 $\pm$ 0.0042 & 0.1372 $\pm$ 0.0044 \\ + & DDPM & 0.1110 $\pm$ 0.0036 & 0.1110 $\pm$ 0.0036 & 0.1110 $\pm$ 0.0036 & 0.1109 $\pm$ 0.0035 \\ + & DDPM-KL & 0.1296 $\pm$ 0.0035 & 0.1296 $\pm$ 0.0035 & 0.1295 $\pm$ 0.0035 & 0.1293 $\pm$ 0.0037 \\ + & SP-IP & 0.1098 $\pm$ 0.0032 & 0.1095 $\pm$ 0.0031 & 0.1095 $\pm$ 0.0030 & 0.1096 $\pm$ 0.0030 \\ + & SP-P & 0.1109 $\pm$ 0.0034 & 0.1105 $\pm$ 0.0030 & 0.1105 $\pm$ 0.0030 & 0.1105 $\pm$ 0.0033 \\ + & TVAE & 0.1352 $\pm$ 0.0044 & 0.1343 $\pm$ 0.0053 & 0.1341 $\pm$ 0.0055 & 0.1336 $\pm$ 0.0053 \\ +\cline{1-6} +\bottomrule +\end{tabular} + \end{table} + +\begin{table} + \caption{ + Table of results from the Abalone dataset. The numbers are + the mean MSE $\pm$ standard deviation from 3 repeats. + } + \label{table:abalone-results} + \small + \centering + \begin{tabular}{llllllll} +\toprule + & m & 1 & 2 & 4 & 8 & 16 & 32 \\ +Downstream & Generator & & & & & & \\ +\midrule +\multirow[c]{2}{*}{Linear Regression} & DDPM & 8.24 $\pm$ 2.392 & 8.24 $\pm$ 2.392 & 8.31 $\pm$ 2.334 & 8.25 $\pm$ 2.388 & 7.79 $\pm$ 1.602 & 7.79 $\pm$ 1.602 \\ + & SP-P & 5.57 $\pm$ 0.646 & 5.42 $\pm$ 0.429 & 5.35 $\pm$ 0.443 & 5.38 $\pm$ 0.470 & 5.35 $\pm$ 0.463 & 5.33 $\pm$ 0.452 \\ +\cline{1-8} +\multirow[c]{2}{*}{Ridge Regression} & DDPM & 8.24 $\pm$ 2.392 & 8.24 $\pm$ 2.392 & 8.31 $\pm$ 2.335 & 8.25 $\pm$ 2.388 & 7.79 $\pm$ 1.599 & 7.79 $\pm$ 1.599 \\ + & SP-P & 5.57 $\pm$ 0.648 & 5.43 $\pm$ 0.432 & 5.35 $\pm$ 0.445 & 5.39 $\pm$ 0.472 & 5.35 $\pm$ 0.465 & 5.33 $\pm$ 0.454 \\ +\cline{1-8} +\multirow[c]{2}{*}{1-NN} & DDPM & 9.01 $\pm$ 0.621 & 9.01 $\pm$ 0.621 & 8.57 $\pm$ 0.621 & 8.27 $\pm$ 0.123 & 7.72 $\pm$ 0.434 & 7.72 $\pm$ 0.434 \\ + & SP-P & 9.52 $\pm$ 0.890 & 7.35 $\pm$ 0.764 & 6.21 $\pm$ 0.482 & 5.66 $\pm$ 0.514 & 5.30 $\pm$ 0.439 & 5.13 $\pm$ 0.479 \\ +\cline{1-8} +\multirow[c]{2}{*}{5-NN} & DDPM & 5.76 $\pm$ 0.457 & 5.76 $\pm$ 0.457 & 5.71 $\pm$ 0.399 & 5.66 $\pm$ 0.493 & 5.56 $\pm$ 0.491 & 5.56 $\pm$ 0.491 \\ + & SP-P & 6.32 $\pm$ 0.932 & 5.75 $\pm$ 0.704 & 5.42 $\pm$ 0.550 & 5.26 $\pm$ 0.539 & 5.18 $\pm$ 0.523 & 5.14 $\pm$ 0.513 \\ +\cline{1-8} +\multirow[c]{2}{*}{Decision Tree} & DDPM & 9.17 $\pm$ 0.199 & 8.79 $\pm$ 0.289 & 8.19 $\pm$ 0.741 & 7.81 $\pm$ 0.509 & 7.17 $\pm$ 0.174 & 7.17 $\pm$ 0.194 \\ + & SP-P & 9.37 $\pm$ 0.523 & 7.38 $\pm$ 0.418 & 6.05 $\pm$ 0.237 & 5.43 $\pm$ 0.165 & 5.07 $\pm$ 0.274 & 4.95 $\pm$ 0.348 \\ +\cline{1-8} +\multirow[c]{2}{*}{Random Forest} & DDPM & 5.36 $\pm$ 0.372 & 5.34 $\pm$ 0.405 & 5.24 $\pm$ 0.361 & 5.21 $\pm$ 0.399 & 5.14 $\pm$ 0.360 & 5.13 $\pm$ 0.361 \\ + & SP-P & 5.76 $\pm$ 0.378 & 5.40 $\pm$ 0.282 & 5.12 $\pm$ 0.268 & 4.96 $\pm$ 0.253 & 4.87 $\pm$ 0.276 & 4.85 $\pm$ 0.318 \\ +\cline{1-8} +\multirow[c]{2}{*}{Gradient Boosting} & DDPM & 5.47 $\pm$ 0.506 & 5.47 $\pm$ 0.506 & 5.47 $\pm$ 0.503 & 5.45 $\pm$ 0.526 & 5.39 $\pm$ 0.509 & 5.39 $\pm$ 0.508 \\ + & SP-P & 5.45 $\pm$ 0.336 & 5.25 $\pm$ 0.301 & 5.08 $\pm$ 0.294 & 4.99 $\pm$ 0.329 & 4.96 $\pm$ 0.324 & 4.96 $\pm$ 0.363 \\ +\cline{1-8} +\multirow[c]{2}{*}{MLP} & DDPM & 5.06 $\pm$ 0.323 & 5.04 $\pm$ 0.335 & 5.00 $\pm$ 0.249 & 5.00 $\pm$ 0.273 & 4.92 $\pm$ 0.267 & 4.92 $\pm$ 0.280 \\ + & SP-P & 6.02 $\pm$ 1.935 & 5.38 $\pm$ 1.075 & 4.96 $\pm$ 0.550 & 4.97 $\pm$ 0.570 & 4.84 $\pm$ 0.399 & 4.81 $\pm$ 0.399 \\ +\cline{1-8} +\multirow[c]{2}{*}{SVM} & DDPM & 5.55 $\pm$ 0.592 & 5.55 $\pm$ 0.592 & 5.55 $\pm$ 0.586 & 5.54 $\pm$ 0.598 & 5.51 $\pm$ 0.610 & 5.51 $\pm$ 0.610 \\ + & SP-P & 5.38 $\pm$ 0.515 & 5.35 $\pm$ 0.546 & 5.28 $\pm$ 0.499 & 5.28 $\pm$ 0.583 & 5.27 $\pm$ 0.604 & 5.26 $\pm$ 0.588 \\ +\cline{1-8} +\bottomrule +\end{tabular} + \end{table} + +\begin{table} + \caption{ + Table of results from the ACS 2018 dataset. The numbers are + the mean MSE $\pm$ standard deviation from 3 repeats. The nans + for linear regression are caused by excluding repeats with extremely large MSE ($\geq 10^6$). + } + \label{table:acs2018-results} + \small + \centering + \begin{tabular}{llllllll} +\toprule + & m & 1 & 2 & 4 & 8 & 16 & 32 \\ +Downstream & Generator & & & & & & \\ +\midrule +\multirow[c]{2}{*}{Linear Regression} & DDPM & 0.87 $\pm$ 0.008 & 0.87 $\pm$ 0.007 & 0.87 $\pm$ 0.005 & 0.87 $\pm$ 0.001 & 0.87 $\pm$ 0.003 & 0.87 $\pm$ 0.003 \\ + & SP-P & 0.78 $\pm$ 0.007 & 0.78 $\pm$ 0.006 & 0.78 $\pm$ 0.007 & 0.78 $\pm$ nan & nan & nan \\ +\cline{1-8} +\multirow[c]{2}{*}{Ridge Regression} & DDPM & 0.87 $\pm$ 0.008 & 0.87 $\pm$ 0.007 & 0.87 $\pm$ 0.005 & 0.87 $\pm$ 0.003 & 0.87 $\pm$ 0.004 & 0.87 $\pm$ 0.003 \\ + & SP-P & 0.78 $\pm$ 0.007 & 0.78 $\pm$ 0.006 & 0.78 $\pm$ 0.007 & 0.78 $\pm$ 0.007 & 0.77 $\pm$ 0.007 & 0.77 $\pm$ 0.006 \\ +\cline{1-8} +\multirow[c]{2}{*}{1-NN} & DDPM & 1.55 $\pm$ 0.022 & 1.26 $\pm$ 0.026 & 1.10 $\pm$ 0.019 & 1.03 $\pm$ 0.014 & 1.00 $\pm$ 0.013 & 0.98 $\pm$ 0.011 \\ + & SP-P & 1.38 $\pm$ 0.015 & 1.04 $\pm$ 0.010 & 0.88 $\pm$ 0.001 & 0.79 $\pm$ 0.004 & 0.75 $\pm$ 0.006 & 0.73 $\pm$ 0.007 \\ +\cline{1-8} +\multirow[c]{2}{*}{5-NN} & DDPM & 0.98 $\pm$ 0.006 & 0.93 $\pm$ 0.009 & 0.90 $\pm$ 0.008 & 0.89 $\pm$ 0.007 & 0.89 $\pm$ 0.004 & 0.88 $\pm$ 0.004 \\ + & SP-P & 0.93 $\pm$ 0.002 & 0.82 $\pm$ 0.005 & 0.78 $\pm$ 0.004 & 0.75 $\pm$ 0.008 & 0.74 $\pm$ 0.008 & 0.73 $\pm$ 0.009 \\ +\cline{1-8} +\multirow[c]{2}{*}{Decision Tree} & DDPM & 1.41 $\pm$ 0.008 & 1.09 $\pm$ 0.005 & 0.95 $\pm$ 0.006 & 0.87 $\pm$ 0.004 & 0.84 $\pm$ 0.006 & 0.82 $\pm$ 0.011 \\ + & SP-P & 1.25 $\pm$ 0.035 & 0.96 $\pm$ 0.023 & 0.82 $\pm$ 0.011 & 0.74 $\pm$ 0.005 & 0.71 $\pm$ 0.003 & 0.69 $\pm$ 0.004 \\ +\cline{1-8} +\multirow[c]{2}{*}{Random Forest} & DDPM & 0.88 $\pm$ 0.010 & 0.82 $\pm$ 0.013 & 0.80 $\pm$ 0.009 & 0.78 $\pm$ 0.009 & 0.78 $\pm$ 0.009 & 0.77 $\pm$ 0.009 \\ + & SP-P & 0.84 $\pm$ 0.010 & 0.76 $\pm$ 0.010 & 0.72 $\pm$ 0.006 & 0.69 $\pm$ 0.005 & 0.68 $\pm$ 0.005 & 0.68 $\pm$ 0.006 \\ +\cline{1-8} +\multirow[c]{2}{*}{Gradient Boosting} & DDPM & 0.74 $\pm$ 0.008 & 0.74 $\pm$ 0.008 & 0.74 $\pm$ 0.006 & 0.74 $\pm$ 0.005 & 0.74 $\pm$ 0.004 & 0.74 $\pm$ 0.004 \\ + & SP-P & 0.68 $\pm$ 0.005 & 0.68 $\pm$ 0.005 & 0.67 $\pm$ 0.005 & 0.67 $\pm$ 0.005 & 0.67 $\pm$ 0.006 & 0.67 $\pm$ 0.006 \\ +\cline{1-8} +\multirow[c]{2}{*}{MLP} & DDPM & 0.82 $\pm$ 0.019 & 0.77 $\pm$ 0.018 & 0.76 $\pm$ 0.017 & 0.75 $\pm$ 0.007 & 0.74 $\pm$ 0.005 & 0.74 $\pm$ 0.004 \\ + & SP-P & 0.76 $\pm$ 0.006 & 0.71 $\pm$ 0.003 & 0.68 $\pm$ 0.008 & 0.67 $\pm$ 0.007 & 0.67 $\pm$ 0.007 & 0.66 $\pm$ 0.006 \\ +\cline{1-8} +\multirow[c]{2}{*}{SVM} & DDPM & 0.79 $\pm$ 0.004 & 0.78 $\pm$ 0.002 & 0.77 $\pm$ 0.002 & 0.78 $\pm$ 0.002 & 0.78 $\pm$ 0.003 & 0.78 $\pm$ 0.002 \\ + & SP-P & 0.71 $\pm$ 0.005 & 0.69 $\pm$ 0.003 & 0.69 $\pm$ 0.002 & 0.69 $\pm$ 0.004 & 0.69 $\pm$ 0.004 & 0.68 $\pm$ 0.004 \\ +\cline{1-8} +\bottomrule +\end{tabular} + \end{table} + +\begin{table} + \caption{ + Table of results from the California Housing dataset. The numbers are + the mean MSE $\pm$ standard deviation from 3 repeats. + } + \label{table:california-housing-results} + \small + \centering + \begin{tabular}{llllllll} +\toprule + & m & 1 & 2 & 4 & 8 & 16 & 32 \\ +Downstream & Generator & & & & & & \\ +\midrule +\multirow[c]{2}{*}{Linear Regression} & DDPM & 0.12 $\pm$ 0.004 & 0.12 $\pm$ 0.004 & 0.12 $\pm$ 0.003 & 0.12 $\pm$ 0.003 & 0.12 $\pm$ 0.003 & 0.12 $\pm$ 0.003 \\ + & SP-P & 0.12 $\pm$ 0.001 & 0.11 $\pm$ 0.002 & 0.11 $\pm$ 0.002 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.002 & 0.11 $\pm$ 0.002 \\ +\cline{1-8} +\multirow[c]{2}{*}{Ridge Regression} & DDPM & 0.12 $\pm$ 0.004 & 0.12 $\pm$ 0.004 & 0.12 $\pm$ 0.003 & 0.12 $\pm$ 0.003 & 0.12 $\pm$ 0.003 & 0.12 $\pm$ 0.003 \\ + & SP-P & 0.12 $\pm$ 0.001 & 0.11 $\pm$ 0.002 & 0.11 $\pm$ 0.002 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.002 & 0.11 $\pm$ 0.002 \\ +\cline{1-8} +\multirow[c]{2}{*}{1-NN} & DDPM & 0.14 $\pm$ 0.008 & 0.14 $\pm$ 0.008 & 0.12 $\pm$ 0.006 & 0.13 $\pm$ 0.007 & 0.12 $\pm$ 0.004 & 0.12 $\pm$ 0.004 \\ + & SP-P & 0.16 $\pm$ 0.002 & 0.12 $\pm$ 0.002 & 0.09 $\pm$ 0.002 & 0.08 $\pm$ 0.001 & 0.08 $\pm$ 0.002 & 0.08 $\pm$ 0.001 \\ +\cline{1-8} +\multirow[c]{2}{*}{5-NN} & DDPM & 0.09 $\pm$ 0.002 & 0.09 $\pm$ 0.002 & 0.09 $\pm$ 0.002 & 0.09 $\pm$ 0.002 & 0.09 $\pm$ 0.002 & 0.09 $\pm$ 0.002 \\ + & SP-P & 0.10 $\pm$ 0.003 & 0.09 $\pm$ 0.001 & 0.08 $\pm$ 0.001 & 0.08 $\pm$ 0.001 & 0.08 $\pm$ 0.001 & 0.08 $\pm$ 0.001 \\ +\cline{1-8} +\multirow[c]{2}{*}{Decision Tree} & DDPM & 0.13 $\pm$ 0.003 & 0.13 $\pm$ 0.004 & 0.10 $\pm$ 0.004 & 0.11 $\pm$ 0.006 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.002 \\ + & SP-P & 0.15 $\pm$ 0.004 & 0.10 $\pm$ 0.004 & 0.08 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.06 $\pm$ 0.001 \\ +\cline{1-8} +\multirow[c]{2}{*}{Random Forest} & DDPM & 0.07 $\pm$ 0.002 & 0.07 $\pm$ 0.002 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 \\ + & SP-P & 0.08 $\pm$ 0.002 & 0.07 $\pm$ 0.003 & 0.07 $\pm$ 0.002 & 0.06 $\pm$ 0.001 & 0.06 $\pm$ 0.001 & 0.06 $\pm$ 0.001 \\ +\cline{1-8} +\multirow[c]{2}{*}{Gradient Boosting} & DDPM & 0.07 $\pm$ 0.002 & 0.07 $\pm$ 0.002 & 0.07 $\pm$ 0.002 & 0.07 $\pm$ 0.002 & 0.07 $\pm$ 0.002 & 0.07 $\pm$ 0.002 \\ + & SP-P & 0.08 $\pm$ 0.002 & 0.07 $\pm$ 0.002 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 \\ +\cline{1-8} +\multirow[c]{2}{*}{MLP} & DDPM & 0.07 $\pm$ 0.000 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.06 $\pm$ 0.001 & 0.06 $\pm$ 0.001 \\ + & SP-P & 0.08 $\pm$ 0.001 & 0.07 $\pm$ 0.002 & 0.07 $\pm$ 0.002 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 \\ +\cline{1-8} +\multirow[c]{2}{*}{SVM} & DDPM & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 \\ + & SP-P & 0.08 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 & 0.07 $\pm$ 0.001 \\ +\cline{1-8} +\bottomrule +\end{tabular} + \end{table} + +\begin{table} + \caption{ + Table of results from the Insurance dataset. The numbers are + the mean MSE $\pm$ standard deviation from 3 repeats. + } + \label{table:insurance-results} + \small + \centering + \begin{tabular}{llllllll} +\toprule + & m & 1 & 2 & 4 & 8 & 16 & 32 \\ +Downstream & Generator & & & & & & \\ +\midrule +\multirow[c]{2}{*}{Linear Regression} & DDPM & 0.25 $\pm$ 0.037 & 0.24 $\pm$ 0.039 & 0.23 $\pm$ 0.030 & 0.23 $\pm$ 0.023 & 0.23 $\pm$ 0.020 & 0.23 $\pm$ 0.021 \\ + & SP-P & 0.20 $\pm$ 0.009 & 0.20 $\pm$ 0.009 & 0.20 $\pm$ 0.008 & 0.20 $\pm$ 0.006 & 0.20 $\pm$ 0.006 & 0.20 $\pm$ 0.006 \\ +\cline{1-8} +\multirow[c]{2}{*}{Ridge Regression} & DDPM & 0.25 $\pm$ 0.037 & 0.24 $\pm$ 0.039 & 0.23 $\pm$ 0.031 & 0.23 $\pm$ 0.023 & 0.23 $\pm$ 0.021 & 0.23 $\pm$ 0.021 \\ + & SP-P & 0.20 $\pm$ 0.009 & 0.20 $\pm$ 0.010 & 0.20 $\pm$ 0.008 & 0.20 $\pm$ 0.007 & 0.20 $\pm$ 0.006 & 0.20 $\pm$ 0.006 \\ +\cline{1-8} +\multirow[c]{2}{*}{1-NN} & DDPM & 0.40 $\pm$ 0.076 & 0.37 $\pm$ 0.104 & 0.34 $\pm$ 0.056 & 0.33 $\pm$ 0.042 & 0.34 $\pm$ 0.040 & 0.34 $\pm$ 0.040 \\ + & SP-P & 0.29 $\pm$ 0.063 & 0.20 $\pm$ 0.012 & 0.17 $\pm$ 0.015 & 0.15 $\pm$ 0.009 & 0.15 $\pm$ 0.006 & 0.14 $\pm$ 0.002 \\ +\cline{1-8} +\multirow[c]{2}{*}{5-NN} & DDPM & 0.23 $\pm$ 0.031 & 0.21 $\pm$ 0.031 & 0.21 $\pm$ 0.017 & 0.20 $\pm$ 0.009 & 0.20 $\pm$ 0.014 & 0.21 $\pm$ 0.017 \\ + & SP-P & 0.19 $\pm$ 0.014 & 0.16 $\pm$ 0.007 & 0.15 $\pm$ 0.008 & 0.15 $\pm$ 0.005 & 0.15 $\pm$ 0.005 & 0.14 $\pm$ 0.003 \\ +\cline{1-8} +\multirow[c]{2}{*}{Decision Tree} & DDPM & 0.38 $\pm$ 0.074 & 0.34 $\pm$ 0.110 & 0.32 $\pm$ 0.075 & 0.31 $\pm$ 0.071 & 0.32 $\pm$ 0.070 & 0.33 $\pm$ 0.069 \\ + & SP-P & 0.26 $\pm$ 0.011 & 0.21 $\pm$ 0.012 & 0.17 $\pm$ 0.003 & 0.16 $\pm$ 0.002 & 0.15 $\pm$ 0.007 & 0.14 $\pm$ 0.007 \\ +\cline{1-8} +\multirow[c]{2}{*}{Random Forest} & DDPM & 0.22 $\pm$ 0.015 & 0.20 $\pm$ 0.028 & 0.19 $\pm$ 0.015 & 0.19 $\pm$ 0.013 & 0.19 $\pm$ 0.009 & 0.19 $\pm$ 0.010 \\ + & SP-P & 0.18 $\pm$ 0.023 & 0.16 $\pm$ 0.019 & 0.15 $\pm$ 0.007 & 0.14 $\pm$ 0.006 & 0.14 $\pm$ 0.008 & 0.14 $\pm$ 0.007 \\ +\cline{1-8} +\multirow[c]{2}{*}{Gradient Boosting} & DDPM & 0.18 $\pm$ 0.017 & 0.17 $\pm$ 0.011 & 0.17 $\pm$ 0.004 & 0.17 $\pm$ 0.005 & 0.17 $\pm$ 0.013 & 0.17 $\pm$ 0.016 \\ + & SP-P & 0.16 $\pm$ 0.009 & 0.15 $\pm$ 0.009 & 0.14 $\pm$ 0.007 & 0.14 $\pm$ 0.009 & 0.14 $\pm$ 0.010 & 0.13 $\pm$ 0.009 \\ +\cline{1-8} +\multirow[c]{2}{*}{MLP} & DDPM & 0.23 $\pm$ 0.010 & 0.23 $\pm$ 0.008 & 0.21 $\pm$ 0.008 & 0.20 $\pm$ 0.009 & 0.20 $\pm$ 0.013 & 0.20 $\pm$ 0.014 \\ + & SP-P & 0.16 $\pm$ 0.009 & 0.16 $\pm$ 0.011 & 0.15 $\pm$ 0.009 & 0.15 $\pm$ 0.009 & 0.15 $\pm$ 0.011 & 0.15 $\pm$ 0.012 \\ +\cline{1-8} +\multirow[c]{2}{*}{SVM} & DDPM & 0.18 $\pm$ 0.018 & 0.17 $\pm$ 0.023 & 0.16 $\pm$ 0.015 & 0.16 $\pm$ 0.010 & 0.16 $\pm$ 0.004 & 0.16 $\pm$ 0.004 \\ + & SP-P & 0.15 $\pm$ 0.015 & 0.14 $\pm$ 0.014 & 0.14 $\pm$ 0.015 & 0.14 $\pm$ 0.015 & 0.14 $\pm$ 0.015 & 0.14 $\pm$ 0.014 \\ +\cline{1-8} +\bottomrule +\end{tabular} + \end{table} + +\begin{table} + \caption{ + Table of results from the Adult dataset. The numbers are + the mean Brier score $\pm$ standard deviation from 3 repeats. + } + \label{table:adult-brier-results} + \scriptsize + \centering + \begin{tabular}{llllllll} +\toprule + & m & 1 & 2 & 4 & 8 & 16 & 32 \\ +Downstream & Generator & & & & & & \\ +\midrule +\multirow[c]{4}{*}{Logistic Regression} & DDPM - Log Prob. Avg. & 0.12 $\pm$ 0.001 & 0.12 $\pm$ 0.002 & 0.12 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.000 & 0.11 $\pm$ 0.000 \\ + & DDPM - Prob. Avg. & 0.12 $\pm$ 0.001 & 0.11 $\pm$ 0.002 & 0.12 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.000 & 0.11 $\pm$ 0.000 \\ + & SP-P - Log Prob. Avg. & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 \\ + & SP-P - Prob. Avg. & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 \\ +\cline{1-8} +\multirow[c]{4}{*}{1-NN} & DDPM - Log Prob. Avg. & 0.24 $\pm$ 0.005 & 0.19 $\pm$ 0.004 & 0.19 $\pm$ 0.006 & 0.18 $\pm$ 0.002 & 0.18 $\pm$ 0.003 & 0.17 $\pm$ 0.003 \\ + & DDPM - Prob. Avg. & 0.24 $\pm$ 0.005 & 0.19 $\pm$ 0.004 & 0.16 $\pm$ 0.005 & 0.15 $\pm$ 0.002 & 0.14 $\pm$ 0.002 & 0.14 $\pm$ 0.002 \\ + & SP-P - Log Prob. Avg. & 0.21 $\pm$ 0.005 & 0.16 $\pm$ 0.003 & 0.15 $\pm$ 0.003 & 0.15 $\pm$ 0.005 & 0.14 $\pm$ 0.003 & 0.14 $\pm$ 0.003 \\ + & SP-P - Prob. Avg. & 0.21 $\pm$ 0.005 & 0.16 $\pm$ 0.003 & 0.13 $\pm$ 0.002 & 0.12 $\pm$ 0.002 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 \\ +\cline{1-8} +\multirow[c]{4}{*}{5-NN} & DDPM - Log Prob. Avg. & 0.14 $\pm$ 0.003 & 0.14 $\pm$ 0.001 & 0.14 $\pm$ 0.002 & 0.14 $\pm$ 0.002 & 0.13 $\pm$ 0.001 & 0.13 $\pm$ 0.000 \\ + & DDPM - Prob. Avg. & 0.14 $\pm$ 0.003 & 0.13 $\pm$ 0.001 & 0.13 $\pm$ 0.001 & 0.13 $\pm$ 0.001 & 0.13 $\pm$ 0.000 & 0.12 $\pm$ 0.000 \\ + & SP-P - Log Prob. Avg. & 0.14 $\pm$ 0.003 & 0.14 $\pm$ 0.004 & 0.14 $\pm$ 0.003 & 0.13 $\pm$ 0.003 & 0.13 $\pm$ 0.001 & 0.13 $\pm$ 0.002 \\ + & SP-P - Prob. Avg. & 0.14 $\pm$ 0.003 & 0.13 $\pm$ 0.003 & 0.12 $\pm$ 0.002 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 \\ +\cline{1-8} +\multirow[c]{4}{*}{Decision Tree} & DDPM - Log Prob. Avg. & 0.21 $\pm$ 0.000 & 0.17 $\pm$ 0.001 & 0.16 $\pm$ 0.001 & 0.15 $\pm$ 0.002 & 0.15 $\pm$ 0.000 & 0.14 $\pm$ 0.001 \\ + & DDPM - Prob. Avg. & 0.21 $\pm$ 0.000 & 0.16 $\pm$ 0.001 & 0.14 $\pm$ 0.001 & 0.13 $\pm$ 0.001 & 0.12 $\pm$ 0.000 & 0.12 $\pm$ 0.000 \\ + & SP-P - Log Prob. Avg. & 0.18 $\pm$ 0.003 & 0.15 $\pm$ 0.001 & 0.14 $\pm$ 0.002 & 0.13 $\pm$ 0.002 & 0.12 $\pm$ 0.002 & 0.12 $\pm$ 0.001 \\ + & SP-P - Prob. Avg. & 0.18 $\pm$ 0.003 & 0.14 $\pm$ 0.001 & 0.12 $\pm$ 0.002 & 0.11 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 \\ +\cline{1-8} +\multirow[c]{4}{*}{Random Forest} & DDPM - Log Prob. Avg. & 0.12 $\pm$ 0.001 & 0.12 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 \\ + & DDPM - Prob. Avg. & 0.12 $\pm$ 0.001 & 0.12 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 \\ + & SP-P - Log Prob. Avg. & 0.12 $\pm$ 0.002 & 0.11 $\pm$ 0.002 & 0.11 $\pm$ 0.002 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 \\ + & SP-P - Prob. Avg. & 0.12 $\pm$ 0.002 & 0.11 $\pm$ 0.002 & 0.10 $\pm$ 0.002 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 \\ +\cline{1-8} +\multirow[c]{4}{*}{Gradient Boosting} & DDPM - Log Prob. Avg. & 0.11 $\pm$ 0.002 & 0.11 $\pm$ 0.000 & 0.11 $\pm$ 0.000 & 0.11 $\pm$ 0.000 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 \\ + & DDPM - Prob. Avg. & 0.11 $\pm$ 0.002 & 0.11 $\pm$ 0.000 & 0.11 $\pm$ 0.000 & 0.11 $\pm$ 0.000 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 \\ + & SP-P - Log Prob. Avg. & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 \\ + & SP-P - Prob. Avg. & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 \\ +\cline{1-8} +\multirow[c]{4}{*}{MLP} & DDPM - Log Prob. Avg. & 0.14 $\pm$ 0.006 & 0.13 $\pm$ 0.002 & 0.12 $\pm$ 0.002 & 0.12 $\pm$ 0.002 & 0.12 $\pm$ 0.001 & 0.12 $\pm$ 0.001 \\ + & DDPM - Prob. Avg. & 0.14 $\pm$ 0.006 & 0.12 $\pm$ 0.002 & 0.12 $\pm$ 0.002 & 0.12 $\pm$ 0.002 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 \\ + & SP-P - Log Prob. Avg. & 0.13 $\pm$ 0.003 & 0.12 $\pm$ 0.002 & 0.11 $\pm$ 0.002 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.002 & 0.11 $\pm$ 0.001 \\ + & SP-P - Prob. Avg. & 0.13 $\pm$ 0.003 & 0.11 $\pm$ 0.002 & 0.11 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 & 0.10 $\pm$ 0.001 \\ +\cline{1-8} +\multirow[c]{4}{*}{SVM} & DDPM - Log Prob. Avg. & 0.12 $\pm$ 0.003 & 0.12 $\pm$ 0.001 & 0.12 $\pm$ 0.001 & 0.12 $\pm$ 0.000 & 0.12 $\pm$ 0.000 & 0.12 $\pm$ 0.001 \\ + & DDPM - Prob. Avg. & 0.12 $\pm$ 0.003 & 0.12 $\pm$ 0.001 & 0.12 $\pm$ 0.001 & 0.12 $\pm$ 0.000 & 0.12 $\pm$ 0.000 & 0.12 $\pm$ 0.001 \\ + & SP-P - Log Prob. Avg. & 0.12 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 \\ + & SP-P - Prob. Avg. & 0.12 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 & 0.11 $\pm$ 0.001 \\ +\cline{1-8} +\bottomrule +\end{tabular} + \end{table} + +\begin{table} + \caption{ + Table of results from the Breast Cancer dataset. The numbers are + the mean Brier score $\pm$ standard deviation from 3 repeats. + } + \label{table:breast-cancer-brier-results} + \scriptsize + \centering + \begin{tabular}{llllllll} +\toprule + & m & 1 & 2 & 4 & 8 & 16 & 32 \\ +Downstream & Generator & & & & & & \\ +\midrule +\multirow[c]{4}{*}{Logistic Regression} & DDPM - Log Prob. Avg. & 0.11 $\pm$ 0.056 & 0.10 $\pm$ 0.071 & 0.10 $\pm$ 0.071 & 0.09 $\pm$ 0.075 & 0.09 $\pm$ 0.078 & 0.09 $\pm$ 0.077 \\ + & DDPM - Prob. Avg. & 0.11 $\pm$ 0.056 & 0.10 $\pm$ 0.069 & 0.10 $\pm$ 0.069 & 0.10 $\pm$ 0.074 & 0.10 $\pm$ 0.077 & 0.10 $\pm$ 0.076 \\ + & SP-P - Log Prob. Avg. & 0.04 $\pm$ 0.009 & 0.03 $\pm$ 0.003 & 0.03 $\pm$ 0.004 & 0.02 $\pm$ 0.004 & 0.02 $\pm$ 0.003 & 0.02 $\pm$ 0.003 \\ + & SP-P - Prob. Avg. & 0.04 $\pm$ 0.009 & 0.03 $\pm$ 0.003 & 0.03 $\pm$ 0.003 & 0.03 $\pm$ 0.003 & 0.03 $\pm$ 0.004 & 0.02 $\pm$ 0.004 \\ +\cline{1-8} +\multirow[c]{4}{*}{1-NN} & DDPM - Log Prob. Avg. & 0.12 $\pm$ 0.075 & 0.09 $\pm$ 0.054 & 0.09 $\pm$ 0.054 & 0.10 $\pm$ 0.043 & 0.10 $\pm$ 0.043 & 0.10 $\pm$ 0.043 \\ + & DDPM - Prob. Avg. & 0.12 $\pm$ 0.075 & 0.09 $\pm$ 0.054 & 0.09 $\pm$ 0.054 & 0.09 $\pm$ 0.049 & 0.09 $\pm$ 0.048 & 0.09 $\pm$ 0.047 \\ + & SP-P - Log Prob. Avg. & 0.11 $\pm$ 0.045 & 0.07 $\pm$ 0.015 & 0.05 $\pm$ 0.013 & 0.04 $\pm$ 0.002 & 0.04 $\pm$ 0.003 & 0.03 $\pm$ 0.003 \\ + & SP-P - Prob. Avg. & 0.11 $\pm$ 0.045 & 0.07 $\pm$ 0.015 & 0.05 $\pm$ 0.011 & 0.04 $\pm$ 0.004 & 0.04 $\pm$ 0.000 & 0.03 $\pm$ 0.001 \\ +\cline{1-8} +\multirow[c]{4}{*}{5-NN} & DDPM - Log Prob. Avg. & 0.14 $\pm$ 0.086 & 0.13 $\pm$ 0.087 & 0.13 $\pm$ 0.087 & 0.13 $\pm$ 0.081 & 0.12 $\pm$ 0.070 & 0.12 $\pm$ 0.077 \\ + & DDPM - Prob. Avg. & 0.14 $\pm$ 0.086 & 0.12 $\pm$ 0.068 & 0.12 $\pm$ 0.068 & 0.11 $\pm$ 0.061 & 0.11 $\pm$ 0.058 & 0.11 $\pm$ 0.060 \\ + & SP-P - Log Prob. Avg. & 0.05 $\pm$ 0.012 & 0.04 $\pm$ 0.005 & 0.03 $\pm$ 0.007 & 0.03 $\pm$ 0.003 & 0.03 $\pm$ 0.005 & 0.02 $\pm$ 0.004 \\ + & SP-P - Prob. Avg. & 0.05 $\pm$ 0.012 & 0.04 $\pm$ 0.003 & 0.04 $\pm$ 0.002 & 0.03 $\pm$ 0.002 & 0.03 $\pm$ 0.001 & 0.03 $\pm$ 0.000 \\ +\cline{1-8} +\multirow[c]{4}{*}{Decision Tree} & DDPM - Log Prob. Avg. & 0.14 $\pm$ 0.083 & 0.09 $\pm$ 0.026 & 0.09 $\pm$ 0.026 & 0.10 $\pm$ 0.033 & 0.10 $\pm$ 0.033 & 0.10 $\pm$ 0.031 \\ + & DDPM - Prob. Avg. & 0.14 $\pm$ 0.083 & 0.09 $\pm$ 0.026 & 0.09 $\pm$ 0.025 & 0.08 $\pm$ 0.020 & 0.09 $\pm$ 0.025 & 0.09 $\pm$ 0.023 \\ + & SP-P - Log Prob. Avg. & 0.10 $\pm$ 0.028 & 0.05 $\pm$ 0.012 & 0.04 $\pm$ 0.009 & 0.03 $\pm$ 0.004 & 0.03 $\pm$ 0.009 & 0.03 $\pm$ 0.006 \\ + & SP-P - Prob. Avg. & 0.10 $\pm$ 0.028 & 0.05 $\pm$ 0.012 & 0.04 $\pm$ 0.006 & 0.03 $\pm$ 0.003 & 0.03 $\pm$ 0.006 & 0.03 $\pm$ 0.004 \\ +\cline{1-8} +\multirow[c]{4}{*}{Random Forest} & DDPM - Log Prob. Avg. & 0.08 $\pm$ 0.033 & 0.07 $\pm$ 0.018 & 0.07 $\pm$ 0.019 & 0.07 $\pm$ 0.019 & 0.07 $\pm$ 0.022 & 0.07 $\pm$ 0.022 \\ + & DDPM - Prob. Avg. & 0.08 $\pm$ 0.033 & 0.07 $\pm$ 0.018 & 0.07 $\pm$ 0.019 & 0.07 $\pm$ 0.019 & 0.07 $\pm$ 0.022 & 0.07 $\pm$ 0.022 \\ + & SP-P - Log Prob. Avg. & 0.04 $\pm$ 0.004 & 0.03 $\pm$ 0.008 & 0.03 $\pm$ 0.008 & 0.03 $\pm$ 0.007 & 0.03 $\pm$ 0.006 & 0.03 $\pm$ 0.006 \\ + & SP-P - Prob. Avg. & 0.04 $\pm$ 0.004 & 0.03 $\pm$ 0.007 & 0.03 $\pm$ 0.007 & 0.03 $\pm$ 0.006 & 0.03 $\pm$ 0.006 & 0.03 $\pm$ 0.006 \\ +\cline{1-8} +\multirow[c]{4}{*}{Gradient Boosting} & DDPM - Log Prob. Avg. & 0.13 $\pm$ 0.089 & 0.08 $\pm$ 0.031 & 0.09 $\pm$ 0.034 & 0.08 $\pm$ 0.029 & 0.08 $\pm$ 0.033 & 0.08 $\pm$ 0.032 \\ + & DDPM - Prob. Avg. & 0.13 $\pm$ 0.089 & 0.09 $\pm$ 0.031 & 0.09 $\pm$ 0.033 & 0.08 $\pm$ 0.029 & 0.08 $\pm$ 0.031 & 0.08 $\pm$ 0.030 \\ + & SP-P - Log Prob. Avg. & 0.04 $\pm$ 0.002 & 0.04 $\pm$ 0.004 & 0.03 $\pm$ 0.003 & 0.03 $\pm$ 0.004 & 0.03 $\pm$ 0.006 & 0.03 $\pm$ 0.006 \\ + & SP-P - Prob. Avg. & 0.04 $\pm$ 0.002 & 0.04 $\pm$ 0.004 & 0.03 $\pm$ 0.002 & 0.03 $\pm$ 0.002 & 0.03 $\pm$ 0.004 & 0.03 $\pm$ 0.005 \\ +\cline{1-8} +\multirow[c]{4}{*}{MLP} & DDPM - Log Prob. Avg. & 0.05 $\pm$ 0.015 & 0.05 $\pm$ 0.008 & 0.04 $\pm$ 0.007 & 0.05 $\pm$ 0.010 & 0.05 $\pm$ 0.016 & 0.05 $\pm$ 0.017 \\ + & DDPM - Prob. Avg. & 0.05 $\pm$ 0.015 & 0.05 $\pm$ 0.007 & 0.05 $\pm$ 0.005 & 0.05 $\pm$ 0.009 & 0.05 $\pm$ 0.015 & 0.05 $\pm$ 0.016 \\ + & SP-P - Log Prob. Avg. & 0.05 $\pm$ 0.012 & 0.04 $\pm$ 0.010 & 0.03 $\pm$ 0.006 & 0.03 $\pm$ 0.001 & 0.02 $\pm$ 0.007 & 0.02 $\pm$ 0.006 \\ + & SP-P - Prob. Avg. & 0.05 $\pm$ 0.012 & 0.04 $\pm$ 0.009 & 0.03 $\pm$ 0.003 & 0.03 $\pm$ 0.002 & 0.03 $\pm$ 0.004 & 0.02 $\pm$ 0.003 \\ +\cline{1-8} +\multirow[c]{4}{*}{SVM} & DDPM - Log Prob. Avg. & 0.11 $\pm$ 0.051 & 0.11 $\pm$ 0.053 & 0.11 $\pm$ 0.053 & 0.11 $\pm$ 0.053 & 0.11 $\pm$ 0.051 & 0.11 $\pm$ 0.051 \\ + & DDPM - Prob. Avg. & 0.11 $\pm$ 0.051 & 0.11 $\pm$ 0.053 & 0.11 $\pm$ 0.053 & 0.11 $\pm$ 0.053 & 0.11 $\pm$ 0.051 & 0.11 $\pm$ 0.051 \\ + & SP-P - Log Prob. Avg. & 0.04 $\pm$ 0.005 & 0.04 $\pm$ 0.007 & 0.03 $\pm$ 0.005 & 0.03 $\pm$ 0.003 & 0.03 $\pm$ 0.003 & 0.03 $\pm$ 0.003 \\ + & SP-P - Prob. Avg. & 0.04 $\pm$ 0.005 & 0.04 $\pm$ 0.006 & 0.03 $\pm$ 0.004 & 0.03 $\pm$ 0.003 & 0.03 $\pm$ 0.004 & 0.03 $\pm$ 0.003 \\ +\cline{1-8} +\bottomrule +\end{tabular} + \end{table} + +\begin{table} + \caption{ + Table of results from the German Credit dataset. The numbers are + the mean Brier score $\pm$ standard deviation from 3 repeats. + } + \label{table:german-credit-brier-results} + \scriptsize + \centering + \begin{tabular}{llllllll} +\toprule + & m & 1 & 2 & 4 & 8 & 16 & 32 \\ +Downstream & Generator & & & & & & \\ +\midrule +\multirow[c]{4}{*}{Logistic Regression} & DDPM - Log Prob. Avg. & 0.23 $\pm$ 0.017 & 0.22 $\pm$ 0.008 & 0.21 $\pm$ 0.009 & 0.21 $\pm$ 0.014 & 0.22 $\pm$ 0.013 & 0.21 $\pm$ 0.013 \\ + & DDPM - Prob. Avg. & 0.23 $\pm$ 0.017 & 0.22 $\pm$ 0.006 & 0.21 $\pm$ 0.007 & 0.21 $\pm$ 0.011 & 0.21 $\pm$ 0.010 & 0.21 $\pm$ 0.011 \\ + & SP-P - Log Prob. Avg. & 0.20 $\pm$ 0.003 & 0.18 $\pm$ 0.011 & 0.17 $\pm$ 0.013 & 0.17 $\pm$ 0.011 & 0.17 $\pm$ 0.009 & 0.16 $\pm$ 0.010 \\ + & SP-P - Prob. Avg. & 0.20 $\pm$ 0.003 & 0.18 $\pm$ 0.012 & 0.17 $\pm$ 0.013 & 0.17 $\pm$ 0.010 & 0.17 $\pm$ 0.008 & 0.16 $\pm$ 0.008 \\ +\cline{1-8} +\multirow[c]{4}{*}{1-NN} & DDPM - Log Prob. Avg. & 0.41 $\pm$ 0.030 & 0.33 $\pm$ 0.027 & 0.35 $\pm$ 0.017 & 0.34 $\pm$ 0.021 & 0.32 $\pm$ 0.014 & 0.32 $\pm$ 0.018 \\ + & DDPM - Prob. Avg. & 0.41 $\pm$ 0.030 & 0.33 $\pm$ 0.027 & 0.29 $\pm$ 0.007 & 0.26 $\pm$ 0.010 & 0.25 $\pm$ 0.002 & 0.25 $\pm$ 0.005 \\ + & SP-P - Log Prob. Avg. & 0.37 $\pm$ 0.037 & 0.28 $\pm$ 0.033 & 0.27 $\pm$ 0.026 & 0.26 $\pm$ 0.024 & 0.24 $\pm$ 0.031 & 0.23 $\pm$ 0.028 \\ + & SP-P - Prob. Avg. & 0.37 $\pm$ 0.037 & 0.28 $\pm$ 0.033 & 0.23 $\pm$ 0.015 & 0.20 $\pm$ 0.011 & 0.19 $\pm$ 0.013 & 0.18 $\pm$ 0.013 \\ +\cline{1-8} +\multirow[c]{4}{*}{5-NN} & DDPM - Log Prob. Avg. & 0.24 $\pm$ 0.022 & 0.25 $\pm$ 0.013 & 0.24 $\pm$ 0.019 & 0.25 $\pm$ 0.025 & 0.25 $\pm$ 0.028 & 0.25 $\pm$ 0.029 \\ + & DDPM - Prob. Avg. & 0.24 $\pm$ 0.022 & 0.23 $\pm$ 0.011 & 0.22 $\pm$ 0.010 & 0.22 $\pm$ 0.012 & 0.22 $\pm$ 0.013 & 0.22 $\pm$ 0.012 \\ + & SP-P - Log Prob. Avg. & 0.23 $\pm$ 0.011 & 0.22 $\pm$ 0.019 & 0.21 $\pm$ 0.012 & 0.21 $\pm$ 0.010 & 0.20 $\pm$ 0.007 & 0.20 $\pm$ 0.014 \\ + & SP-P - Prob. Avg. & 0.23 $\pm$ 0.011 & 0.20 $\pm$ 0.016 & 0.19 $\pm$ 0.011 & 0.18 $\pm$ 0.010 & 0.18 $\pm$ 0.007 & 0.18 $\pm$ 0.009 \\ +\cline{1-8} +\multirow[c]{4}{*}{Decision Tree} & DDPM - Log Prob. Avg. & 0.44 $\pm$ 0.048 & 0.32 $\pm$ 0.045 & 0.31 $\pm$ 0.038 & 0.30 $\pm$ 0.030 & 0.29 $\pm$ 0.017 & 0.28 $\pm$ 0.032 \\ + & DDPM - Prob. Avg. & 0.44 $\pm$ 0.048 & 0.32 $\pm$ 0.045 & 0.26 $\pm$ 0.025 & 0.24 $\pm$ 0.015 & 0.23 $\pm$ 0.008 & 0.22 $\pm$ 0.012 \\ + & SP-P - Log Prob. Avg. & 0.33 $\pm$ 0.037 & 0.25 $\pm$ 0.018 & 0.25 $\pm$ 0.013 & 0.23 $\pm$ 0.024 & 0.21 $\pm$ 0.018 & 0.20 $\pm$ 0.018 \\ + & SP-P - Prob. Avg. & 0.33 $\pm$ 0.037 & 0.25 $\pm$ 0.018 & 0.20 $\pm$ 0.014 & 0.19 $\pm$ 0.016 & 0.17 $\pm$ 0.013 & 0.16 $\pm$ 0.011 \\ +\cline{1-8} +\multirow[c]{4}{*}{Random Forest} & DDPM - Log Prob. Avg. & 0.22 $\pm$ 0.014 & 0.21 $\pm$ 0.009 & 0.21 $\pm$ 0.009 & 0.21 $\pm$ 0.010 & 0.21 $\pm$ 0.010 & 0.21 $\pm$ 0.010 \\ + & DDPM - Prob. Avg. & 0.22 $\pm$ 0.014 & 0.21 $\pm$ 0.009 & 0.21 $\pm$ 0.009 & 0.21 $\pm$ 0.010 & 0.21 $\pm$ 0.010 & 0.21 $\pm$ 0.009 \\ + & SP-P - Log Prob. Avg. & 0.19 $\pm$ 0.006 & 0.18 $\pm$ 0.011 & 0.17 $\pm$ 0.009 & 0.17 $\pm$ 0.008 & 0.17 $\pm$ 0.007 & 0.17 $\pm$ 0.008 \\ + & SP-P - Prob. Avg. & 0.19 $\pm$ 0.006 & 0.18 $\pm$ 0.011 & 0.17 $\pm$ 0.009 & 0.17 $\pm$ 0.008 & 0.17 $\pm$ 0.007 & 0.17 $\pm$ 0.007 \\ +\cline{1-8} +\multirow[c]{4}{*}{Gradient Boosting} & DDPM - Log Prob. Avg. & 0.23 $\pm$ 0.021 & 0.22 $\pm$ 0.012 & 0.22 $\pm$ 0.009 & 0.22 $\pm$ 0.009 & 0.22 $\pm$ 0.008 & 0.21 $\pm$ 0.012 \\ + & DDPM - Prob. Avg. & 0.23 $\pm$ 0.021 & 0.22 $\pm$ 0.013 & 0.22 $\pm$ 0.008 & 0.22 $\pm$ 0.008 & 0.21 $\pm$ 0.007 & 0.21 $\pm$ 0.010 \\ + & SP-P - Log Prob. Avg. & 0.20 $\pm$ 0.018 & 0.18 $\pm$ 0.017 & 0.17 $\pm$ 0.012 & 0.17 $\pm$ 0.014 & 0.17 $\pm$ 0.011 & 0.16 $\pm$ 0.012 \\ + & SP-P - Prob. Avg. & 0.20 $\pm$ 0.018 & 0.18 $\pm$ 0.017 & 0.17 $\pm$ 0.012 & 0.17 $\pm$ 0.012 & 0.17 $\pm$ 0.010 & 0.16 $\pm$ 0.010 \\ +\cline{1-8} +\multirow[c]{4}{*}{MLP} & DDPM - Log Prob. Avg. & 0.33 $\pm$ 0.024 & 0.30 $\pm$ 0.016 & 0.30 $\pm$ 0.020 & 0.29 $\pm$ 0.024 & 0.29 $\pm$ 0.022 & 0.28 $\pm$ 0.015 \\ + & DDPM - Prob. Avg. & 0.33 $\pm$ 0.024 & 0.28 $\pm$ 0.018 & 0.26 $\pm$ 0.013 & 0.25 $\pm$ 0.020 & 0.24 $\pm$ 0.019 & 0.24 $\pm$ 0.014 \\ + & SP-P - Log Prob. Avg. & 0.27 $\pm$ 0.037 & 0.24 $\pm$ 0.030 & 0.21 $\pm$ 0.019 & 0.19 $\pm$ 0.016 & 0.19 $\pm$ 0.018 & 0.18 $\pm$ 0.016 \\ + & SP-P - Prob. Avg. & 0.27 $\pm$ 0.037 & 0.21 $\pm$ 0.021 & 0.18 $\pm$ 0.009 & 0.17 $\pm$ 0.012 & 0.16 $\pm$ 0.013 & 0.16 $\pm$ 0.013 \\ +\cline{1-8} +\multirow[c]{4}{*}{SVM} & DDPM - Log Prob. Avg. & 0.21 $\pm$ 0.010 & 0.21 $\pm$ 0.009 & 0.21 $\pm$ 0.010 & 0.21 $\pm$ 0.010 & 0.21 $\pm$ 0.010 & 0.21 $\pm$ 0.010 \\ + & DDPM - Prob. Avg. & 0.21 $\pm$ 0.010 & 0.21 $\pm$ 0.009 & 0.21 $\pm$ 0.010 & 0.21 $\pm$ 0.010 & 0.21 $\pm$ 0.010 & 0.21 $\pm$ 0.010 \\ + & SP-P - Log Prob. Avg. & 0.20 $\pm$ 0.008 & 0.18 $\pm$ 0.013 & 0.17 $\pm$ 0.013 & 0.17 $\pm$ 0.011 & 0.17 $\pm$ 0.008 & 0.17 $\pm$ 0.009 \\ + & SP-P - Prob. Avg. & 0.20 $\pm$ 0.008 & 0.18 $\pm$ 0.014 & 0.17 $\pm$ 0.012 & 0.17 $\pm$ 0.010 & 0.17 $\pm$ 0.008 & 0.17 $\pm$ 0.008 \\ +\cline{1-8} +\bottomrule +\end{tabular} + \end{table} + +\begin{table} + \caption{ + Table of results from predicting MSE on the ACS 2018 dataset. + The numbers are the mean measured and predicted MSEs $\pm$ standard + deviations from 3 repeats. The nans for linear regression are due to + excluding some repeats that had an extremely large MSE ($\geq 10^6$). + } + \label{table:mse-prediction-acs-2018} + \tiny + \centering + \begin{tabular}{lllllllll} +\toprule + & & m & 1 & 2 & 4 & 8 & 16 & 32 \\ +Downstream & Generator & Predicted / Measured & & & & & & \\ +\midrule +\multirow[c]{4}{*}{Linear Regression} & \multirow[c]{2}{*}{DDPM} & Predicted & 0.87 $\pm$ 0.008 & 0.87 $\pm$ 0.007 & 0.87 $\pm$ 0.008 & 0.88 $\pm$ 0.006 & 0.88 $\pm$ 0.007 & 0.88 $\pm$ 0.008 \\ + & & Measured & 0.87 $\pm$ 0.008 & 0.87 $\pm$ 0.007 & 0.87 $\pm$ 0.005 & 0.87 $\pm$ 0.001 & 0.87 $\pm$ 0.003 & 0.87 $\pm$ 0.003 \\ +\cline{2-9} + & \multirow[c]{2}{*}{SP-P} & Predicted & 0.78 $\pm$ 0.007 & 0.78 $\pm$ 0.006 & 0.78 $\pm$ 0.006 & 0.78 $\pm$ nan & nan & nan \\ + & & Measured & 0.78 $\pm$ 0.007 & 0.78 $\pm$ 0.006 & 0.78 $\pm$ 0.007 & 0.78 $\pm$ nan & nan & nan \\ +\cline{1-9} \cline{2-9} +\multirow[c]{4}{*}{Ridge Regression} & \multirow[c]{2}{*}{DDPM} & Predicted & 0.87 $\pm$ 0.008 & 0.87 $\pm$ 0.007 & 0.87 $\pm$ 0.008 & 0.87 $\pm$ 0.009 & 0.87 $\pm$ 0.010 & 0.87 $\pm$ 0.010 \\ + & & Measured & 0.87 $\pm$ 0.008 & 0.87 $\pm$ 0.007 & 0.87 $\pm$ 0.005 & 0.87 $\pm$ 0.003 & 0.87 $\pm$ 0.004 & 0.87 $\pm$ 0.003 \\ +\cline{2-9} + & \multirow[c]{2}{*}{SP-P} & Predicted & 0.78 $\pm$ 0.007 & 0.78 $\pm$ 0.006 & 0.78 $\pm$ 0.006 & 0.77 $\pm$ 0.006 & 0.77 $\pm$ 0.006 & 0.77 $\pm$ 0.006 \\ + & & Measured & 0.78 $\pm$ 0.007 & 0.78 $\pm$ 0.006 & 0.78 $\pm$ 0.007 & 0.78 $\pm$ 0.007 & 0.77 $\pm$ 0.007 & 0.77 $\pm$ 0.006 \\ +\cline{1-9} \cline{2-9} +\multirow[c]{4}{*}{1-NN} & \multirow[c]{2}{*}{DDPM} & Predicted & 1.55 $\pm$ 0.022 & 1.26 $\pm$ 0.026 & 1.11 $\pm$ 0.027 & 1.04 $\pm$ 0.028 & 1.01 $\pm$ 0.029 & 0.99 $\pm$ 0.029 \\ + & & Measured & 1.55 $\pm$ 0.022 & 1.26 $\pm$ 0.026 & 1.10 $\pm$ 0.019 & 1.03 $\pm$ 0.014 & 1.00 $\pm$ 0.013 & 0.98 $\pm$ 0.011 \\ +\cline{2-9} + & \multirow[c]{2}{*}{SP-P} & Predicted & 1.38 $\pm$ 0.015 & 1.04 $\pm$ 0.010 & 0.87 $\pm$ 0.009 & 0.79 $\pm$ 0.010 & 0.75 $\pm$ 0.010 & 0.72 $\pm$ 0.010 \\ + & & Measured & 1.38 $\pm$ 0.015 & 1.04 $\pm$ 0.010 & 0.88 $\pm$ 0.001 & 0.79 $\pm$ 0.004 & 0.75 $\pm$ 0.006 & 0.73 $\pm$ 0.007 \\ +\cline{1-9} \cline{2-9} +\multirow[c]{4}{*}{5-NN} & \multirow[c]{2}{*}{DDPM} & Predicted & 0.98 $\pm$ 0.006 & 0.93 $\pm$ 0.009 & 0.91 $\pm$ 0.010 & 0.90 $\pm$ 0.011 & 0.89 $\pm$ 0.011 & 0.89 $\pm$ 0.012 \\ + & & Measured & 0.98 $\pm$ 0.006 & 0.93 $\pm$ 0.009 & 0.90 $\pm$ 0.008 & 0.89 $\pm$ 0.007 & 0.89 $\pm$ 0.004 & 0.88 $\pm$ 0.004 \\ +\cline{2-9} + & \multirow[c]{2}{*}{SP-P} & Predicted & 0.93 $\pm$ 0.002 & 0.82 $\pm$ 0.005 & 0.77 $\pm$ 0.008 & 0.74 $\pm$ 0.009 & 0.73 $\pm$ 0.009 & 0.72 $\pm$ 0.010 \\ + & & Measured & 0.93 $\pm$ 0.002 & 0.82 $\pm$ 0.005 & 0.78 $\pm$ 0.004 & 0.75 $\pm$ 0.008 & 0.74 $\pm$ 0.008 & 0.73 $\pm$ 0.009 \\ +\cline{1-9} \cline{2-9} +\multirow[c]{4}{*}{Decision Tree} & \multirow[c]{2}{*}{DDPM} & Predicted & 1.41 $\pm$ 0.008 & 1.09 $\pm$ 0.005 & 0.94 $\pm$ 0.008 & 0.86 $\pm$ 0.009 & 0.82 $\pm$ 0.010 & 0.80 $\pm$ 0.011 \\ + & & Measured & 1.41 $\pm$ 0.008 & 1.09 $\pm$ 0.005 & 0.95 $\pm$ 0.006 & 0.87 $\pm$ 0.004 & 0.84 $\pm$ 0.006 & 0.82 $\pm$ 0.011 \\ +\cline{2-9} + & \multirow[c]{2}{*}{SP-P} & Predicted & 1.25 $\pm$ 0.035 & 0.96 $\pm$ 0.023 & 0.82 $\pm$ 0.018 & 0.75 $\pm$ 0.017 & 0.72 $\pm$ 0.016 & 0.70 $\pm$ 0.016 \\ + & & Measured & 1.25 $\pm$ 0.035 & 0.96 $\pm$ 0.023 & 0.82 $\pm$ 0.011 & 0.74 $\pm$ 0.005 & 0.71 $\pm$ 0.003 & 0.69 $\pm$ 0.004 \\ +\cline{1-9} \cline{2-9} +\multirow[c]{4}{*}{Random Forest} & \multirow[c]{2}{*}{DDPM} & Predicted & 0.88 $\pm$ 0.010 & 0.82 $\pm$ 0.013 & 0.80 $\pm$ 0.015 & 0.78 $\pm$ 0.016 & 0.77 $\pm$ 0.017 & 0.77 $\pm$ 0.017 \\ + & & Measured & 0.88 $\pm$ 0.010 & 0.82 $\pm$ 0.013 & 0.80 $\pm$ 0.009 & 0.78 $\pm$ 0.009 & 0.78 $\pm$ 0.009 & 0.77 $\pm$ 0.009 \\ +\cline{2-9} + & \multirow[c]{2}{*}{SP-P} & Predicted & 0.84 $\pm$ 0.010 & 0.76 $\pm$ 0.010 & 0.72 $\pm$ 0.010 & 0.70 $\pm$ 0.010 & 0.69 $\pm$ 0.011 & 0.68 $\pm$ 0.011 \\ + & & Measured & 0.84 $\pm$ 0.010 & 0.76 $\pm$ 0.010 & 0.72 $\pm$ 0.006 & 0.69 $\pm$ 0.005 & 0.68 $\pm$ 0.005 & 0.68 $\pm$ 0.006 \\ +\cline{1-9} \cline{2-9} +\multirow[c]{4}{*}{Gradient Boosting} & \multirow[c]{2}{*}{DDPM} & Predicted & 0.74 $\pm$ 0.008 & 0.74 $\pm$ 0.008 & 0.74 $\pm$ 0.008 & 0.74 $\pm$ 0.008 & 0.74 $\pm$ 0.008 & 0.74 $\pm$ 0.008 \\ + & & Measured & 0.74 $\pm$ 0.008 & 0.74 $\pm$ 0.008 & 0.74 $\pm$ 0.006 & 0.74 $\pm$ 0.005 & 0.74 $\pm$ 0.004 & 0.74 $\pm$ 0.004 \\ +\cline{2-9} + & \multirow[c]{2}{*}{SP-P} & Predicted & 0.68 $\pm$ 0.005 & 0.68 $\pm$ 0.005 & 0.68 $\pm$ 0.005 & 0.67 $\pm$ 0.005 & 0.67 $\pm$ 0.005 & 0.67 $\pm$ 0.005 \\ + & & Measured & 0.68 $\pm$ 0.005 & 0.68 $\pm$ 0.005 & 0.67 $\pm$ 0.005 & 0.67 $\pm$ 0.005 & 0.67 $\pm$ 0.006 & 0.67 $\pm$ 0.006 \\ +\cline{1-9} \cline{2-9} +\multirow[c]{4}{*}{MLP} & \multirow[c]{2}{*}{DDPM} & Predicted & 0.82 $\pm$ 0.019 & 0.77 $\pm$ 0.018 & 0.75 $\pm$ 0.018 & 0.74 $\pm$ 0.019 & 0.74 $\pm$ 0.019 & 0.73 $\pm$ 0.019 \\ + & & Measured & 0.82 $\pm$ 0.019 & 0.77 $\pm$ 0.018 & 0.76 $\pm$ 0.017 & 0.75 $\pm$ 0.007 & 0.74 $\pm$ 0.005 & 0.74 $\pm$ 0.004 \\ +\cline{2-9} + & \multirow[c]{2}{*}{SP-P} & Predicted & 0.76 $\pm$ 0.006 & 0.71 $\pm$ 0.003 & 0.68 $\pm$ 0.003 & 0.67 $\pm$ 0.003 & 0.66 $\pm$ 0.004 & 0.66 $\pm$ 0.004 \\ + & & Measured & 0.76 $\pm$ 0.006 & 0.71 $\pm$ 0.003 & 0.68 $\pm$ 0.008 & 0.67 $\pm$ 0.007 & 0.67 $\pm$ 0.007 & 0.66 $\pm$ 0.006 \\ +\cline{1-9} \cline{2-9} +\multirow[c]{4}{*}{SVM} & \multirow[c]{2}{*}{DDPM} & Predicted & 0.79 $\pm$ 0.004 & 0.78 $\pm$ 0.002 & 0.77 $\pm$ 0.004 & 0.77 $\pm$ 0.005 & 0.77 $\pm$ 0.005 & 0.77 $\pm$ 0.006 \\ + & & Measured & 0.79 $\pm$ 0.004 & 0.78 $\pm$ 0.002 & 0.77 $\pm$ 0.002 & 0.78 $\pm$ 0.002 & 0.78 $\pm$ 0.003 & 0.78 $\pm$ 0.002 \\ +\cline{2-9} + & \multirow[c]{2}{*}{SP-P} & Predicted & 0.71 $\pm$ 0.005 & 0.69 $\pm$ 0.003 & 0.69 $\pm$ 0.002 & 0.68 $\pm$ 0.001 & 0.68 $\pm$ 0.001 & 0.68 $\pm$ 0.001 \\ + & & Measured & 0.71 $\pm$ 0.005 & 0.69 $\pm$ 0.003 & 0.69 $\pm$ 0.002 & 0.69 $\pm$ 0.004 & 0.69 $\pm$ 0.004 & 0.68 $\pm$ 0.004 \\ +\cline{1-9} \cline{2-9} +\bottomrule +\end{tabular} + \end{table} + +\FloatBarrier + +\subsection{Estimating Model and Synthetic Data Variances}\label{app:variance-estimation-experiment} +In this section, we estimate the MV and SDV terms from the decomposition in +Theorem~\ref{thm:mse-synthetic-data-decomposition}. We first generate +32 synthetic datasets that are 5 times larger than the real dataset, and split +each synthetic datasets into 5 equally-sized subsets. This is equivalent to +sampling 32 $\theta_i$ values, and for each $\theta_i$, generating 5 synthetic +datasets i.i.d.\ We then train the downstream predictor on each synthetic dataset, +and store the predictions for all test points. + +To estimate MV, we compute the sample variance over the 5 synthetic datasets +generated from the same $\theta_i$, and then compute the mean over the 32 different +$\theta_i$ values. To estimate SDV, we compute the sample mean over the 5 synthetic +datasets from the same $\theta_i$, and compute the sample variance over the +32 different $\theta_i$ values. + +The result is an estimate of MV and SDV for each test point. We plot the mean +over the test points. The whole experiment is repeated 3 times, with different +train-test splits. The datasets, train-test splits, and downstream predictors +are the same as in the other experiments, described in +Appendix~\ref{sec:experiment-details}. + +The results are in Figure~\ref{fig:variance-estimation}. MV depends mostly +on the downstream predictor, while SDV also depends on the synthetic data +generator. We also confirm that decision trees and 1-NN have much higher variance +than the other models, and linear, ridge and logistic regression have a +very low variance. + +\begin{figure*} + \begin{subfigure}{0.5\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/variance-estimation/abalone.pdf} + \vspace{-6mm} + \caption{Abalone} + \end{subfigure} + \begin{subfigure}{0.5\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/variance-estimation/ACS2018.pdf} + \vspace{-6mm} + \caption{ACS 2018} + \end{subfigure} + + \vspace{2mm} \begin{subfigure}{0.5\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/variance-estimation/adult.pdf} + \vspace{-6mm} + \caption{Adult} + \end{subfigure} + \begin{subfigure}{0.5\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/variance-estimation/breast-cancer.pdf} + \vspace{-6mm} + \caption{Breast Cancer} + \end{subfigure} + + \vspace{2mm} + \begin{subfigure}{0.5\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/variance-estimation/california-housing.pdf} + \vspace{-6mm} + \caption{California Housing} + \end{subfigure} + \begin{subfigure}{0.5\textwidth} + \centering + \includegraphics[width=\textwidth]{figures/variance-estimation/german-credit.pdf} + \vspace{-6mm} + \caption{German Credit} + \end{subfigure} + + \vspace{2mm} + \begin{subfigure}{1.0\textwidth} + \centering + \includegraphics[width=0.5\textwidth]{figures/variance-estimation/insurance.pdf} + \vspace{-2mm} + \caption{Insurance} + \end{subfigure} + \caption{ + Estimating the MV and SDV terms from the decomposition. Decision trees + have high variances on all datasets, while linear, ridge and logistic regression + have low variances. MV depends mostly on the predictor, while SDV depends + on both the predictor and synthetic data generation algorithm. The + points are the averages of estimated MV and SDV, + averaged over the test data, from 3 repeats with different train-test splits. + We excluded some test data points that had extremely large variance estimates + ($\geq 10^6$) for linear regression on the ACS 2018 dataset. + The predictors are decision tree (DT), nearest neighbours with 1 or 5 neighbours (1-NN and 5-NN), + random forest (RF), a multilayer perceptron (MLP), + gradient boosted trees (GB), a support vector machine (SVM), + linear regression (LR), ridge regression (RR) and logistic regression (LogR). + The synthetic data generators are DDPM and synthpop (SP-P). + } + \label{fig:variance-estimation} +\end{figure*} + + +\end{document}