input
stringlengths 6
17.2k
| output
stringclasses 1
value | instruction
stringclasses 1
value |
---|---|---|
<s> import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
from .bin.aion_pipeline import aion_train_model
<s> import argparse
import sys
import os
import subprocess
INSTALL = 'install'
LINUXINSTALL = 'linuxinstall'
FE_MIGRATE = 'migrateappfe'
LAUNCH_KAFKA = 'launchkafkaconsumer'
RUN_LOCAL_MLAC_PIPELINE = 'runpipelinelocal'
BUILD_MLAC_CONTAINER = 'buildmlaccontainerlocal'
CONVERT_MODEL = 'convertmodel'
START_MLFLOW = 'mlflow'
COMMON_SERVICE = 'service'
TRAINING = 'training'
TRAINING_AWS = 'trainingonaws'
TRAINING_DISTRIBUTED = 'distributedtraining'
START_APPF = 'appfe'
ONLINE_TRAINING = 'onlinetraining'
TEXT_SUMMARIZATION = 'textsummarization'
GENERATE_MLAC = 'generatemlac'
AWS_TRAINING = 'awstraining'
LLAMA_7B_TUNING = 'llama7btuning'
LLM_PROMPT = 'llmprompt'
LLM_TUNING = 'llmtuning'
LLM_PUBLISH = 'llmpublish'
LLM_BENCHMARKING = 'llmbenchmarking'
TELEMETRY_PUSH = 'pushtelemetry'
def aion_aws_training(confFile):
from hyperscalers.aion_aws_training import awsTraining
status = awsTraining(confFile)
print(status)
def aion_training(confFile):
from bin.aion_pipeline import aion_train_model
status = aion_train_model(confFile)
print(status)
def aion_awstraining(config_file):
from hyperscalers import aws_instance
print(config_file)
aws_instance.training(config_file)
def aion_generatemlac(ConfFile):
from bin.aion_mlac import generate_mlac_code
status = generate_mlac_code(ConfFile)
print(status)
def aion_textsummarization(confFile):
from bin.aion_text_summarizer import aion_textsummary
status = aion_textsummary(confFile)
def aion_oltraining(confFile):
from bin.aion_online_pipeline import aion_ot_train_model
status = aion_ot_train_model(confFile)
print(status)
def do_telemetry_sync():
from appbe.telemetry import SyncTelemetry
SyncTelemetry()
def aion_llm_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image):
from llm.llm_inference import LLM_publish
LLM_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image)
def aion_migratefe(operation):
import os
import sys
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
argi=[]
argi.append(os.path.abspath(__file__))
argi.append(operation)
execute_from_command_line(argi)
def aion_appfe(url,port):
#manage_location = os.path.join(os.path.dirname(os.path.abspath(__file__)),'manage.py')
#subprocess.check_call([sys.executable,manage_location, "runserver","%s:%s"%(url,port)])
import os
import sys
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
argi=[]
argi.append(os.path.abspath(__file__))
argi.append('runaion')
argi.append("%s:%s"%(url,port))
execute_from_command_line(argi)
def aion_linux_install(version):
from install import linux_dependencies
linux_dependencies.process(version)
def aion_install(version):
from install import dependencies
dependencies.process(version)
def aion_service(ip,port,username,password):
from bin.aion_service import start_server
start_server(ip,port,username,password)
def aion_distributedLearning(confFile):
from distributed_learning import learning
learning.training(confFile)
def aion_launchkafkaconsumer():
from mlops import kafka_consumer
kafka_consumer.launch_kafka_consumer()
def aion_start_mlflow():
from appbe.dataPath import DEPLOY_LOCATION
import platform
import shutil
from os.path import expanduser
mlflowpath = os.path.normpath(os.path.join(os.path.dirname(__file__),'..','..','..','Scripts','mlflow.exe'))
print(mlflowpath)
home = expanduser("~")
if platform.system() == 'Windows':
DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns')
outputStr = subprocess.Popen([sys.executable, mlflowpath,"ui", "--backend-store-uri","file:///"+DEPLOY_LOCATION])
else:
DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns')
subprocess.check_call(['mlflow',"ui","-h","0.0.0.0","--backend-store-uri","file:///"+DEPLOY_LOCATION])
def aion_model_conversion(config_file):
from conversions import model_convertions
model_convertions.convert(config_file)
def aion_model_buildMLaCContainer(config):
from mlops import build_container
build_container.local_docker_build(config)
def aion_model_runpipelinelocal(config):
from mlops import local_pipeline
local_pipeline.run_pipeline(config)
def aion_llm_tuning(config):
from llm.llm_tuning import run
run(config)
def aion_llm_prompt(cloudconfig,instanceid,prompt):
from llm.aws_instance_api import LLM_predict
LLM_predict(cloudconfig,instanceid,prompt)
def llm_bench_marking(hypervisor,instanceid,model,usecaseid,eval):
print(eval)
from llm.bench_marking import bench_mark
bench_mark(hypervisor,instanceid,model,usecaseid,eval)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--configPath', help='Config File Path')
parser.add_argument('-i', '--instanceid', help='instanceid')
parser.add_argument('-hv', '--hypervisor', help='hypervisor')
parser.add_argument('-md', '--model', help='model')
parser.add_argument('-uc', '--usecase', help='usecase')
parser.add_argument('-cc', '--cloudConfigPath', help='Cloud Config File Path')
parser.add_argument('-m', '--module', help='MODULE=TRAINING, APPFE, ONLINETRAINING,DISTRIBUTEDTRAINING')
parser.add_argument('-ip', '--ipaddress', help='URL applicable only for APPFE method ')
parser.add_argument('-p', '--port', help='APP Front End Port applicable only for APPFE method ')
parser.add_argument('-ac', '--appfecommand', help='APP Front End Command ')
parser.add_argument('-un','--username', help="USERNAME")
parser.add_argument('-passw','--password', help="PASSWORD")
parser.add_argument('-j', '--jsoninput', help='JSON Input')
parser.add_argument('-v', '--version', help='Installer Version')
parser.add_argument('-pf', '--prompt', help='Prompt File')
parser.add_argument('-r', '--region', help='REGION NAME')
parser.add_argument('-im', '--image', help='IMAGE NAME')
parser.add_argument('-e', '--eval', help='evaluation for code or doc', default='doc')
args = parser.parse_args()
if args.module.lower() == TRAINING:
aion_training(args.configPath)
elif args.module.lower() == TRAINING_AWS:
aion_awstraining(args.configPath)
elif args.module.lower() == TRAINING_DISTRIBUTED:
aion_distributedLearning(args.configPath)
elif args.module.lower() == START_APPF:
aion_appfe(args.ipaddress,args.port)
elif args.module.lower() == ONLINE_TRAINING:
aion_oltraining(args.configPath)
elif args.module.lower() == TEXT_SUMMARIZATION:
aion_textsummarization(args.configPath)
elif args.module.lower() == GENERATE_MLAC:
aion_generatemlac(args.configPath)
elif args.module.lower() == COMMON_SERVICE:
aion_service(args.ipaddress,args.port,args.username,args.password)
elif args.module.lower() == START_MLFLOW:
aion_mlflow()
elif args.module.lower() == CONVERT_MODEL:
aion_model_conversion(args.configPath)
elif args.module.lower() == BUILD_MLAC_CONTAINER:
aion_model_buildMLaCContainer(args.jsoninput)
elif args.module.lower() == RUN_LOCAL_MLAC_PIPELINE:
aion_model_runpipelinelocal(args.jsoninput)
elif args.module.lower() == LAUNCH_KAFKA:
aion_launchkafkaconsumer()
elif args.module.lower() == INSTALL:
aion_install(args.version)
elif args.module.lower() == LINUXINSTALL:
aion_linux_install(args.version)
elif args.module.lower() == FE_MIGRATE:
aion_migratefe('makemigrations')
aion_migratefe('migrate')
elif args.module.lower() == AWS_TRAINING:
aion_aws_training(args.configPath)
elif args.module.lower() == LLAMA_7B_TUNING:
aion_llm_tuning(args.configPath)
elif args.module.lower() == LLM_TUNING:
aion_llm_tuning(args.configPath)
elif args.module.lower() == LLM_PROMPT:
aion_llm_prompt(args.cloudConfigPath,args.instanceid,args.prompt)
elif args.module.lower() == LLM_PUBLISH:
aion_llm_publish(args.cloudConfigPath,args.instanceid,args.hypervisor,args.model,args.usecase,args.region,args.image)
elif args.module.lower() == LLM_BENCHMARKING:
llm_bench_marking(args.hypervisor,args.instanceid,args.model,args.usecase, args.eval)
elif args.module.lower() == TELEMETRY_PUSH:
do_telemetry_sync()<s><s> import numpy as np
from scipy.stats import norm
from sklearn.metrics import mean_squared_error, r2_score
from ..utils.misc import fitted_ucc_w_nullref
def picp(y_true, y_lower, y_upper):
"""
Prediction Interval Coverage Probability (PICP). Computes the fraction of samples for which the grounds truth lies
within predicted interval. Measures the prediction interval calibration for regression.
Args:
y_true: Ground truth
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: the fraction of samples for which the grounds truth lies within predicted interval.
"""
satisfies_upper_bound = y_true <= y_upper
satisfies_lower_bound = y_true >= y_lower
return np.mean(satisfies_upper_bound * satisfies_lower_bound)
def mpiw(y_lower, y_upper):
"""
Mean Prediction Interval Width (MPIW). Computes the average width of the the prediction intervals. Measures the
sharpness of intervals.
Args:
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: the average width the prediction interval across samples.
"""
return np.mean(np.abs(y_lower - y_upper))
def auucc_gain(y_true, y_mean, y_lower, y_upper):
""" Computes the Area Under the Uncertainty Characteristics Curve (AUUCC) gain wrt to a null reference
with constant band.
Args:
y_true: Ground truth
y_mean: predicted mean
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: AUUCC gain
"""
u = fitted_ucc_w_nullref(y_true, y_mean, y_lower, y_upper)
auucc = u.get_AUUCC()
assert(isinstance(auucc, list) and len(auucc) == 2), "Failed to calculate auucc gain"
assert (not np.isclose(auucc[1], 0.)), "Failed to calculate auucc gain"
auucc_gain = (auucc[1]-auucc[0])/auucc[0]
return auucc_gain
def negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper):
""" Computes Gaussian negative_log_likelihood assuming symmetric band around the mean.
Args:
y_true: Ground truth
y_mean: predicted mean
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: nll
"""
y_std = (y_upper - y_lower) / 4.0
nll = np.mean(-norm.logpdf(y_true.squeeze(), loc=y_mean.squeeze(), scale=y_std.squeeze()))
return nll
def compute_regression_metrics(y_true, y_mean, | ||
y_lower, y_upper, option="all", nll_fn=None):
"""
Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes
the ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"] metrics.
Args:
y_true: Ground truth
y_mean: predicted mean
y_lower: predicted lower bound
y_upper: predicted upper bound
option: string or list of string contained the name of the metrics to be computed.
nll_fn: function that evaluates NLL, if None, then computes Gaussian NLL using y_mean and y_lower.
Returns:
dict: dictionary containing the computed metrics.
"""
assert y_true.shape == y_mean.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_mean.shape)
assert y_true.shape == y_lower.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_lower.shape)
assert y_true.shape == y_upper.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_upper.shape)
results = {}
if not isinstance(option, list):
if option == "all":
option_list = ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"]
else:
option_list = [option]
if "rmse" in option_list:
results["rmse"] = mean_squared_error(y_true, y_mean, squared=False)
if "nll" in option_list:
if nll_fn is None:
nll = negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper)
results["nll"] = nll
else:
results["nll"] = np.mean(nll_fn(y_true))
if "auucc_gain" in option_list:
gain = auucc_gain(y_true, y_mean, y_lower, y_upper)
results["auucc_gain"] = gain
if "picp" in option_list:
results["picp"] = picp(y_true, y_lower, y_upper)
if "mpiw" in option_list:
results["mpiw"] = mpiw(y_lower, y_upper)
if "r2" in option_list:
results["r2"] = r2_score(y_true, y_mean)
return results
def _check_not_tuple_of_2_elements(obj, obj_name='obj'):
"""Check object is not tuple or does not have 2 elements."""
if not isinstance(obj, tuple) or len(obj) != 2:
raise TypeError('%s must be a tuple of 2 elements.' % obj_name)
def plot_uncertainty_distribution(dist, show_quantile_dots=False, qd_sample=20, qd_bins=7,
ax=None, figsize=None, dpi=None,
title='Predicted Distribution', xlims=None, xlabel='Prediction', ylabel='Density', **kwargs):
"""
Plot the uncertainty distribution for a single distribution.
Args:
dist: scipy.stats._continuous_distns.
A scipy distribution object.
show_quantile_dots: boolean.
Whether to show quantil dots on top of the density plot.
qd_sample: int.
Number of dots for the quantile dot plot.
qd_bins: int.
Number of bins for the quantile dot plot.
ax: matplotlib.axes.Axes or None, optional (default=None).
Target axes instance. If None, new figure and axes will be created.
figsize: tuple of 2 elements or None, optional (default=None).
Figure size.
dpi : int or None, optional (default=None).
Resolution of the figure.
title : string or None, optional (default=Prediction Distribution)
Axes title.
If None, title is disabled.
xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``.
xlabel : string or None, optional (default=Prediction)
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default=Density)
Y-axis title label.
If None, title is disabled.
Returns:
matplotlib.axes.Axes: ax : The plot with prediction distribution.
"""
import matplotlib.pyplot as plt
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
x = np.linspace(dist.ppf(0.01), dist.ppf(0.99), 100)
ax.plot(x, dist.pdf(x), **kwargs)
if show_quantile_dots:
from matplotlib.patches import Circle
from matplotlib.collections import PatchCollection
import matplotlib.ticker as ticker
data = dist.rvs(size=10000)
p_less_than_x = np.linspace(1 / qd_sample / 2, 1 - (1 / qd_sample / 2), qd_sample)
x_ = np.percentile(data, p_less_than_x * 100) # Inverce CDF (ppf)
# Create bins
hist = np.histogram(x_, bins=qd_bins)
bins, edges = hist
radius = (edges[1] - edges[0]) / 2
ax2 = ax.twinx()
patches = []
max_y = 0
for i in range(qd_bins):
x_bin = (edges[i + 1] + edges[i]) / 2
y_bins = [(i + 1) * (radius * 2) for i in range(bins[i])]
max_y = max(y_bins) if max(y_bins) > max_y else max_y
for _, y_bin in enumerate(y_bins):
circle = Circle((x_bin, y_bin), radius)
patches.append(circle)
p = PatchCollection(patches, alpha=0.4)
ax2.add_collection(p)
# Axis tweek
y_scale = (max_y + radius) / max(dist.pdf(x))
ticks_y = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x_ / y_scale))
ax2.yaxis.set_major_formatter(ticks_y)
ax2.set_yticklabels([])
if xlims is not None:
ax2.set_xlim(left=xlims[0], right=xlims[1])
else:
ax2.set_xlim([min(x_) - radius, max(x) + radius])
ax2.set_ylim([0, max_y + radius])
ax2.set_aspect(1)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
return ax
def plot_picp_by_feature(x_test, y_test, y_test_pred_lower_total, y_test_pred_upper_total, num_bins=10,
ax=None, figsize=None, dpi=None, xlims=None, ylims=None, xscale="linear",
title=None, xlabel=None, ylabel=None):
"""
Plot how prediction uncertainty varies across the entire range of a feature.
Args:
x_test: One dimensional ndarray.
Feature column of the test dataset.
y_test: One dimensional ndarray.
Ground truth label of the test dataset.
y_test_pred_lower_total: One dimensional ndarray.
Lower bound of the total uncertainty range.
y_test_pred_upper_total: One dimensional ndarray.
Upper bound of the total uncertainty range.
num_bins: int.
Number of bins used to discritize x_test into equal-sample-sized bins.
ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created.
figsize: tuple of 2 elements or None, optional (default=None). Figure size.
dpi : int or None, optional (default=None). Resolution of the figure.
xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``.
ylims: tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.ylim()``.
xscale: Passed to ``ax.set_xscale()``.
title : string or None, optional
Axes title.
If None, title is disabled.
xlabel : string or None, optional
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional
Y-axis title label.
If None, title is disabled.
Returns:
matplotlib.axes.Axes: ax : The plot with PICP scores binned by a feature.
"""
from scipy.stats.mstats import mquantiles
import matplotlib.pyplot as plt
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
x_uniques_sorted = np.sort(np.unique(x_test))
num_unique = len(x_uniques_sorted)
sample_bin_ids = np.searchsorted(x_uniques_sorted, x_test)
if len(x_uniques_sorted) > 10: # bin the values
q_bins = mquantiles(x_test, np.histogram_bin_edges([], bins=num_bins-1, range=(0.0, 1.0))[1:])
q_sample_bin_ids = np.digitize(x_test, q_bins)
picps = np.array([picp(y_test[q_sample_bin_ids==bin], y_test_pred_lower_total[q_sample_bin_ids==bin],
y_test_pred_upper_total[q_sample_bin_ids==bin]) for bin in range(num_bins)])
unique_sample_bin_ids = np.digitize(x_uniques_sorted, q_bins)
picp_replicated = [len(x_uniques_sorted[unique_sample_bin_ids == bin]) * [picps[bin]] for bin in range(num_bins)]
picp_replicated = np.array([item for sublist in picp_replicated for item in sublist])
else:
picps = np.array([picp(y_test[sample_bin_ids == bin], y_test_pred_lower_total[sample_bin_ids == bin],
y_test_pred_upper_total[sample_bin_ids == bin]) for bin in range(num_unique)])
picp_replicated = picps
ax.plot(x_uniques_sorted, picp_replicated, label='PICP')
ax.axhline(0.95, linestyle='--', label='95%')
ax.set_ylabel('PICP')
ax.legend(loc='best')
if title is None:
title = 'Test data overall PICP: {:.2f} MPIW: {:.2f}'.format(
picp(y_test,
y_test_pred_lower_total,
y_test_pred_upper_total),
mpiw(y_test_pred_lower_total,
y_test_pred_upper_total))
if xlims is not None:
ax.set_xlim(left=xlims[0], right=xlims[1])
if ylims is not None:
ax.set_ylim(bottom=ylims[0], top=ylims[1])
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if xscale is not None:
ax.set_xscale(xscale)
return ax
def plot_uncertainty_by_feature(x_test, y_test_pred_mean, y_test_pred_lower_total, y_test_pred_upper_total,
y_test_pred_lower_epistemic=None, y_test_pred_upper_epistemic=None,
ax=None, figsize=None, dpi=None, xlims=None, xscale="linear",
title=None, xlabel=None, ylabel=None):
"""
Plot how prediction uncertainty varies across the entire range of a feature.
Args:
x_test: one dimensional ndarray.
Feature column of the test dataset.
y_test_pred_mean: One dimensional ndarray.
Model prediction for the test dataset.
y_test_pred_lower_total: One dimensional ndarray.
Lower bound of the total uncertainty range.
y_test_pred_upper_total: One dimensional ndarray.
Upper bound of the total uncertainty range.
y_test_pred_lower_epistemic: One dimensional ndarray.
Lower bound of the epistemic uncertainty range.
y_test_pred_upper_epistemic: One dimensional ndarray.
Upper bound of the epistemic uncertainty range.
ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created.
figsize: tuple of 2 elements or None, optional (default=None). Figure size.
dpi : int or None, optional (default=None). Resolution of the figure.
xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``.
xscale: Passed to ``ax.set_xscale()``.
title : string or None, optional
Axes title.
If None, title is disabled.
xlabel : string or None, optional
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional
Y-axis title label.
If None, title is disabled.
Returns:
matplotlib.axes.Axes: ax : The plot with model's uncertainty binned by a feature.
"""
import matplotlib.pyplot as plt
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
x_uniques_sorted = np.sort(np.unique(x_test))
y_pred_var = ((y_test_pred_upper_total - y_test_pred_lower_total) / 4.0)**2
agg_y_std = np.array([np.sqrt(np.mean(y_pred_var[x_test==x])) for x in x_uniques_sorted])
agg_y_mean = | ||
np.array([np.mean(y_test_pred_mean[x_test==x]) for x in x_uniques_sorted])
ax.plot(x_uniques_sorted, agg_y_mean, '-b', lw=2, label='mean prediction')
ax.fill_between(x_uniques_sorted,
agg_y_mean - 2.0 * agg_y_std,
agg_y_mean + 2.0 * agg_y_std,
alpha=0.3, label='total uncertainty')
if y_test_pred_lower_epistemic is not None:
y_pred_var_epistemic = ((y_test_pred_upper_epistemic - y_test_pred_lower_epistemic) / 4.0)**2
agg_y_std_epistemic = np.array([np.sqrt(np.mean(y_pred_var_epistemic[x_test==x])) for x in x_uniques_sorted])
ax.fill_between(x_uniques_sorted,
agg_y_mean - 2.0 * agg_y_std_epistemic,
agg_y_mean + 2.0 * agg_y_std_epistemic,
alpha=0.3, label='model uncertainty')
ax.legend(loc='best')
if xlims is not None:
ax.set_xlim(left=xlims[0], right=xlims[1])
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if xscale is not None:
ax.set_xscale(xscale)
return ax
<s> import numpy as np
import pandas as pd
from scipy.stats import entropy
from sklearn.metrics import roc_auc_score, log_loss, accuracy_score
def entropy_based_uncertainty_decomposition(y_prob_samples):
""" Entropy based decomposition [2]_ of predictive uncertainty into aleatoric and epistemic components.
References:
.. [2] Depeweg, S., Hernandez-Lobato, J. M., Doshi-Velez, F., & Udluft, S. (2018, July). Decomposition of
uncertainty in Bayesian deep learning for efficient and risk-sensitive learning. In International Conference
on Machine Learning (pp. 1184-1193). PMLR.
Args:
y_prob_samples: list of array-like of shape (n_samples, n_classes) containing class prediction probabilities
corresponding to samples from the model posterior.
Returns:
tuple:
- total_uncertainty: entropy of the predictive distribution.
- aleatoric_uncertainty: aleatoric component of the total_uncertainty.
- epistemic_uncertainty: epistemic component of the total_uncertainty.
"""
y_preds_samples_stacked = np.stack(y_prob_samples)
preds_mean = np.mean(y_preds_samples_stacked, 0)
total_uncertainty = entropy(preds_mean, axis=1)
aleatoric_uncertainty = np.mean(
np.concatenate([entropy(y_pred, axis=1).reshape(-1, 1) for y_pred in y_prob_samples], axis=1),
axis=1)
epistemic_uncertainty = total_uncertainty - aleatoric_uncertainty
return total_uncertainty, aleatoric_uncertainty, epistemic_uncertainty
def multiclass_brier_score(y_true, y_prob):
"""Brier score for multi-class.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
Returns:
float: Brier score.
"""
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
y_target = np.zeros_like(y_prob)
y_target[:, y_true] = 1.0
return np.mean(np.sum((y_target - y_prob) ** 2, axis=1))
def area_under_risk_rejection_rate_curve(y_true, y_prob, y_pred=None, selection_scores=None, risk_func=accuracy_score,
attributes=None, num_bins=10, subgroup_ids=None,
return_counts=False):
""" Computes risk vs rejection rate curve and the area under this curve. Similar to risk-coverage curves [3]_ where
coverage instead of rejection rate is used.
References:
.. [3] Franc, Vojtech, and Daniel Prusa. "On discriminative learning of prediction uncertainty."
In International Conference on Machine Learning, pp. 1963-1971. 2019.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like of shape (n_samples,)
predicted labels.
selection_scores: scores corresponding to certainty in the predicted labels.
risk_func: risk function under consideration.
attributes: (optional) if risk function is a fairness metric also pass the protected attribute name.
num_bins: number of bins.
subgroup_ids: (optional) selectively compute risk on a subgroup of the samples specified by subgroup_ids.
return_counts: set to True to return counts also.
Returns:
float or tuple:
- aurrrc (float): area under risk rejection rate curve.
- rejection_rates (list): rejection rates for each bin (returned only if return_counts is True).
- selection_thresholds (list): selection threshold for each bin (returned only if return_counts is True).
- risks (list): risk in each bin (returned only if return_counts is True).
"""
if selection_scores is None:
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
selection_scores = y_prob[np.arange(y_prob.shape[0]), np.argmax(y_prob, axis=1)]
if y_pred is None:
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
y_pred = np.argmax(y_prob, axis=1)
order = np.argsort(selection_scores)[::-1]
rejection_rates = []
selection_thresholds = []
risks = []
for bin_id in range(num_bins):
samples_in_bin = len(y_true) // num_bins
selection_threshold = selection_scores[order[samples_in_bin * (bin_id+1)-1]]
selection_thresholds.append(selection_threshold)
ids = selection_scores >= selection_threshold
if sum(ids) > 0:
if attributes is None:
if isinstance(y_true, pd.Series):
y_true_numpy = y_true.values
else:
y_true_numpy = y_true
if subgroup_ids is None:
risk_value = 1.0 - risk_func(y_true_numpy[ids], y_pred[ids])
else:
if sum(subgroup_ids & ids) > 0:
risk_value = 1.0 - risk_func(y_true_numpy[subgroup_ids & ids], y_pred[subgroup_ids & ids])
else:
risk_value = 0.0
else:
risk_value = risk_func(y_true.iloc[ids], y_pred[ids], prot_attr=attributes)
else:
risk_value = 0.0
risks.append(risk_value)
rejection_rates.append(1.0 - 1.0 * sum(ids) / len(y_true))
aurrrc = np.nanmean(risks)
if not return_counts:
return aurrrc
else:
return aurrrc, rejection_rates, selection_thresholds, risks
def expected_calibration_error(y_true, y_prob, y_pred=None, num_bins=10, return_counts=False):
""" Computes the reliability curve and the expected calibration error [1]_ .
References:
.. [1] Chuan Guo, Geoff Pleiss, Yu Sun, Kilian Q. Weinberger; Proceedings of the 34th International Conference
on Machine Learning, PMLR 70:1321-1330, 2017.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like of shape (n_samples,)
predicted labels.
num_bins: number of bins.
return_counts: set to True to return counts also.
Returns:
float or tuple:
- ece (float): expected calibration error.
- confidences_in_bins: average confidence in each bin (returned only if return_counts is True).
- accuracies_in_bins: accuracy in each bin (returned only if return_counts is True).
- frac_samples_in_bins: fraction of samples in each bin (returned only if return_counts is True).
"""
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
num_samples, num_classes = y_prob.shape
top_scores = np.max(y_prob, axis=1)
if y_pred is None:
y_pred = np.argmax(y_prob, axis=1)
if num_classes == 2:
bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.5, 1.0))
else:
bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.0, 1.0))
non_boundary_bin_edges = bins_edges[1:-1]
bin_centers = (bins_edges[1:] + bins_edges[:-1])/2
sample_bin_ids = np.digitize(top_scores, non_boundary_bin_edges)
num_samples_in_bins = np.zeros(num_bins)
accuracies_in_bins = np.zeros(num_bins)
confidences_in_bins = np.zeros(num_bins)
for bin in range(num_bins):
num_samples_in_bins[bin] = len(y_pred[sample_bin_ids == bin])
if num_samples_in_bins[bin] > 0:
accuracies_in_bins[bin] = np.sum(y_true[sample_bin_ids == bin] == y_pred[sample_bin_ids == bin]) / num_samples_in_bins[bin]
confidences_in_bins[bin] = np.sum(top_scores[sample_bin_ids == bin]) / num_samples_in_bins[bin]
ece = np.sum(
num_samples_in_bins * np.abs(accuracies_in_bins - confidences_in_bins) / num_samples
)
frac_samples_in_bins = num_samples_in_bins / num_samples
if not return_counts:
return ece
else:
return ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bin_centers
def compute_classification_metrics(y_true, y_prob, option='all'):
"""
Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes
the [aurrrc, ece, auroc, nll, brier, accuracy] metrics.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
option: string or list of string contained the name of the metrics to be computed.
Returns:
dict: a dictionary containing the computed metrics.
"""
results = {}
if not isinstance(option, list):
if option == "all":
option_list = ["aurrrc", "ece", "auroc", "nll", "brier", "accuracy"]
else:
option_list = [option]
if "aurrrc" in option_list:
results["aurrrc"] = area_under_risk_rejection_rate_curve(y_true=y_true, y_prob=y_prob)
if "ece" in option_list:
results["ece"] = expected_calibration_error(y_true=y_true, y_prob=y_prob)
if "auroc" in option_list:
results["auroc"], _ = roc_auc_score(y_true=y_true, y_score=y_prob)
if "nll" in option_list:
results["nll"] = log_loss(y_true=y_true, y_pred=np.argmax(y_prob, axis=1))
if "brier" in option_list:
results["brier"] = multiclass_brier_score(y_true=y_true, y_prob=y_prob)
if "accuracy" in option_list:
results["accuracy"] = accuracy_score(y_true=y_true, y_pred=np.argmax(y_prob, axis=1))
return results
def plot_reliability_diagram(y_true, y_prob, y_pred, plot_label=[""], num_bins=10):
"""
Plots the reliability diagram showing the calibration error for different confidence scores. Multiple curves
can be plot by passing data as lists.
Args:
y_true: array-like or or a list of array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like or or a list of array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like or or a list of array-like of shape (n_samples,)
predicted labels.
plot_label: (optional) list of names identifying each curve.
num_bins: number of bins.
Returns:
tuple:
- ece_list: ece: list containing expected calibration error for each curve.
- accuracies_in_bins_list: list containing binned average accuracies for each curve.
- frac_samples_in_bins_list: list containing binned sample frequencies for each curve.
- confidences_in_bins_list: list containing binned average confidence for each curve.
"""
import matplotlib. | ||
pyplot as plt
if not isinstance(y_true, list):
y_true, y_prob, y_pred = [y_true], [y_prob], [y_pred]
if len(plot_label) != len(y_true):
raise ValueError('y_true and plot_label should be of same length.')
ece_list = []
accuracies_in_bins_list = []
frac_samples_in_bins_list = []
confidences_in_bins_list = []
for idx in range(len(plot_label)):
ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bins = expected_calibration_error(y_true[idx],
y_prob[idx],
y_pred[idx],
num_bins=num_bins,
return_counts=True)
ece_list.append(ece)
accuracies_in_bins_list.append(accuracies_in_bins)
frac_samples_in_bins_list.append(frac_samples_in_bins)
confidences_in_bins_list.append(confidences_in_bins)
fig = plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
for idx in range(len(plot_label)):
plt.plot(bins, frac_samples_in_bins_list[idx], 'o-', label=plot_label[idx])
plt.title("Confidence Histogram")
plt.xlabel("Confidence")
plt.ylabel("Fraction of Samples")
plt.grid()
plt.ylim([0.0, 1.0])
plt.legend()
plt.subplot(1, 2, 2)
for idx in range(len(plot_label)):
plt.plot(bins, accuracies_in_bins_list[idx], 'o-',
label="{} ECE = {:.2f}".format(plot_label[idx], ece_list[idx]))
plt.plot(np.linspace(0, 1, 50), np.linspace(0, 1, 50), 'b.', label="Perfect Calibration")
plt.title("Reliability Plot")
plt.xlabel("Confidence")
plt.ylabel("Accuracy")
plt.grid()
plt.legend()
plt.show()
return ece_list, accuracies_in_bins_list, frac_samples_in_bins_list, confidences_in_bins_list
def plot_risk_vs_rejection_rate(y_true, y_prob, y_pred, selection_scores=None, plot_label=[""], risk_func=None,
attributes=None, num_bins=10, subgroup_ids=None):
"""
Plots the risk vs rejection rate curve showing the risk for different rejection rates. Multiple curves
can be plot by passing data as lists.
Args:
y_true: array-like or or a list of array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like or or a list of array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like or or a list of array-like of shape (n_samples,)
predicted labels.
selection_scores: ndarray or a list of ndarray containing scores corresponding to certainty in the predicted labels.
risk_func: risk function under consideration.
attributes: (optional) if risk function is a fairness metric also pass the protected attribute name.
num_bins: number of bins.
subgroup_ids: (optional) ndarray or a list of ndarray containing subgroup_ids to selectively compute risk on a
subgroup of the samples specified by subgroup_ids.
Returns:
tuple:
- aurrrc_list: list containing the area under risk rejection rate curves.
- rejection_rate_list: list containing the binned rejection rates.
- selection_thresholds_list: list containing the binned selection thresholds.
- risk_list: list containing the binned risks.
"""
import matplotlib.pyplot as plt
if not isinstance(y_true, list):
y_true, y_prob, y_pred, selection_scores, subgroup_ids = [y_true], [y_prob], [y_pred], [selection_scores], [subgroup_ids]
if len(plot_label) != len(y_true):
raise ValueError('y_true and plot_label should be of same length.')
aurrrc_list = []
rejection_rate_list = []
risk_list = []
selection_thresholds_list = []
for idx in range(len(plot_label)):
aursrc, rejection_rates, selection_thresholds, risks = area_under_risk_rejection_rate_curve(
y_true[idx],
y_prob[idx],
y_pred[idx],
selection_scores=selection_scores[idx],
risk_func=risk_func,
attributes=attributes,
num_bins=num_bins,
subgroup_ids=subgroup_ids[idx],
return_counts=True
)
aurrrc_list.append(aursrc)
rejection_rate_list.append(rejection_rates)
risk_list.append(risks)
selection_thresholds_list.append(selection_thresholds)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
for idx in range(len(plot_label)):
plt.plot(rejection_rate_list[idx], risk_list[idx], label="{} AURRRC={:.5f}".format(plot_label[idx], aurrrc_list[idx]))
plt.legend(loc="best")
plt.xlabel("Rejection Rate")
if risk_func is None:
ylabel = "Prediction Error Rate"
else:
if 'accuracy' in risk_func.__name__:
ylabel = "1.0 - " + risk_func.__name__
else:
ylabel = risk_func.__name__
plt.ylabel(ylabel)
plt.title("Risk vs Rejection Rate Plot")
plt.grid()
plt.subplot(1, 2, 2)
for idx in range(len(plot_label)):
plt.plot(selection_thresholds_list[idx], risk_list[idx], label="{}".format(plot_label[idx]))
plt.legend(loc="best")
plt.xlabel("Selection Threshold")
if risk_func is None:
ylabel = "Prediction Error Rate"
else:
if 'accuracy' in risk_func.__name__:
ylabel = "1.0 - " + risk_func.__name__
else:
ylabel = risk_func.__name__
plt.ylabel(ylabel)
plt.title("Risk vs Selection Threshold Plot")
plt.grid()
plt.show()
return aurrrc_list, rejection_rate_list, selection_thresholds_list, risk_list
<s> from .classification_metrics import expected_calibration_error, area_under_risk_rejection_rate_curve, \\
compute_classification_metrics, entropy_based_uncertainty_decomposition
from .regression_metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, \\
plot_uncertainty_by_feature, plot_picp_by_feature
from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
<s> from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import simps, trapz
from sklearn.isotonic import IsotonicRegression
DEFAULT_X_AXIS_NAME = 'excess'
DEFAULT_Y_AXIS_NAME = 'missrate'
class UncertaintyCharacteristicsCurve:
"""
Class with main functions of the Uncertainty Characteristics Curve (UCC).
"""
def __init__(self, normalize=True, precompute_bias_data=True):
"""
:param normalize: set initial axes normalization flag (can be changed via set_coordinates())
:param precompute_bias_data: if True, fit() will compute statistics necessary to generate bias-based
UCCs (in addition to the scale-based ones). Skipping this precomputation may speed up the fit() call
if bias-based UCC is not needed.
"""
self.axes_name2idx = {"missrate": 1, "bandwidth": 2, "excess": 3, "deficit": 4}
self.axes_idx2descr = {1: "Missrate", 2: "Bandwidth", 3: "Excess", 4: "Deficit"}
self.x_axis_idx = None
self.y_axis_idx = None
self.norm_x_axis = False
self.norm_y_axis = False
self.std_unit = None
self.normalize = normalize
self.d = None
self.gt = None
self.lb = None
self.ub = None
self.precompute_bias_data = precompute_bias_data
self.set_coordinates(x_axis_name=DEFAULT_X_AXIS_NAME, y_axis_name=DEFAULT_Y_AXIS_NAME, normalize=normalize)
def set_coordinates(self, x_axis_name=None, y_axis_name=None, normalize=None):
"""
Assigns user-specified type to the axes and normalization behavior (sticky).
:param x_axis_name: None-> unchanged, or name from self.axes_name2idx
:param y_axis_name: ditto
:param normalize: True/False will activate/deactivate norming for specified axes. Behavior for
Axes_name that are None will not be changed.
Value None will leave norm status unchanged.
Note, axis=='missrate' will never get normalized, even with normalize == True
:return: none
"""
normalize = self.normalize if normalize is None else normalize
if x_axis_name is None and self.x_axis_idx is None:
raise ValueError("ERROR(UCC): x-axis has not been defined.")
if y_axis_name is None and self.y_axis_idx is None:
raise ValueError("ERROR(UCC): y-axis has not been defined.")
if x_axis_name is None and y_axis_name is None and normalize is not None:
# just set normalization on/off for both axes and return
self.norm_x_axis = False if x_axis_name == 'missrate' else normalize
self.norm_y_axis = False if y_axis_name == 'missrate' else normalize
return
if x_axis_name is not None:
self.x_axis_idx = self.axes_name2idx[x_axis_name]
self.norm_x_axis = False if x_axis_name == 'missrate' else normalize
if y_axis_name is not None:
self.y_axis_idx = self.axes_name2idx[y_axis_name]
self.norm_y_axis = False if y_axis_name == 'missrate' else normalize
def set_std_unit(self, std_unit=None):
"""
Sets the UCC's unit to be used when displaying normalized axes.
:param std_unit: if None, the unit will be calculated as stddev of the ground truth data
(ValueError raised if data has not been set at this point)
or set to the user-specified value.
:return:
"""
if std_unit is None: # set it to stddev of data
if self.gt is None:
raise ValueError("ERROR(UCC): No data specified - cannot set stddev unit.")
self.std_unit = np.std(self.gt)
if np.isclose(self.std_unit, 0.):
print("WARN(UCC): data-based stddev is zero - resetting axes unit to 1.")
self.std_unit = 1.
else:
self.std_unit = float(std_unit)
def fit(self, X, gt):
"""
Calculates internal arrays necessary for other methods (plotting, auc, cost minimization).
Re-entrant.
:param X: [numsamples, 3] numpy matrix, or list of numpy matrices.
Col 1: predicted values
Col 2: lower band (deviate) wrt predicted value (always positive)
Col 3: upper band wrt predicted value (always positive)
If list is provided, all methods will output corresponding metrics as lists as well!
:param gt: Ground truth array (i.e.,the 'actual' values corresponding to predictions in X
:return: self
"""
if not isinstance(X, list):
X = [X]
newX = []
for x in X:
assert (isinstance(x, np.ndarray) and len(x.shape) == 2 and x.shape[1] == 3 and x.shape[0] == len(gt))
newX.append(self._sanitize_input(x))
self.d = [gt - x[:, 0] for x in newX]
self.lb = [x[:, 1] for x in newX]
self.ub = [x[:, 2] for x in newX]
self.gt = gt
self.set_std_unit()
self.plotdata_for_scale = []
self.plotdata_for_bias = []
# precompute plotdata:
for i in range(len(self.d)):
self.plotdata_for_scale.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=False))
if self.precompute_bias_data:
self.plotdata_for_bias.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=True))
return self
def minimize_cost(self, x_axis_cost=.5, y_axis_cost=.5, augment_cost_by_normfactor=True,
search=('scale', 'bias')):
"""
Find minima of a linear cost function for each component.
Cost function C = x_axis_cost * x_axis_value + y_axis_cost * y_axis_value.
A minimum can occur in the scale-based or bias-based UCC (this can be constrained by the 'search' arg).
The function returns a 'recipe' how to achieve the corresponding minimum, for each component.
:param x_axis_cost: weight of one unit on x_axis
:param y_axis_cost: weight of one unit on y_axis
:param augment_cost_by_normfactor: when False, the cost multipliers will apply as is. If True, they will be
pre-normed by the corresponding axis norm (where applicable), to account for range differences between axes.
:param search: list of types over which minimization is to be performed, valid elements are 'scale' and 'bias'.
:return: list of dicts - one per component, or a single dict, if there is only one component. Dict keys are -
'operation': can be 'bias' (additive) or 'scale' (multiplicative), 'modvalue': value to multiply by or to
add to error bars to achieve | ||
the minimum, 'new_x'/'new_y': new coordinates (operating point) with that
minimum, 'cost': new cost at minimum point, 'original_cost': original cost (original operating point).
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if augment_cost_by_normfactor:
if self.norm_x_axis:
x_axis_cost /= self.std_unit
if self.norm_y_axis:
y_axis_cost /= self.std_unit
print("INFO(UCC): Pre-norming costs by corresp. std deviation: new x_axis_cost = %.4f, y_axis_cost = %.4f" %
(x_axis_cost, y_axis_cost))
if isinstance(search, tuple):
search = list(search)
if not isinstance(search, list):
search = [search]
min_costs = []
for d in range(len(self.d)):
# original OP cost
m, b, e, df = self._calc_missrate_bandwidth_excess_deficit(self.d[d], self.lb[d], self.ub[d])
original_cost = x_axis_cost * [0., m, b, e, df][self.x_axis_idx] + y_axis_cost * [0., m, b, e, df][
self.y_axis_idx]
plotdata = self.plotdata_for_scale[d]
cost_scale, minidx_scale = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx,
x_axis_cost, y_axis_cost)
mcf_scale_multiplier = plotdata[minidx_scale][0]
mcf_scale_x = plotdata[minidx_scale][self.x_axis_idx]
mcf_scale_y = plotdata[minidx_scale][self.y_axis_idx]
if 'bias' in search:
if not self.precompute_bias_data:
raise ValueError(
"ERROR(UCC): Cannot perform minimization - instantiated without bias data computation")
plotdata = self.plotdata_for_bias[d]
cost_bias, minidx_bias = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx,
x_axis_cost, y_axis_cost)
mcf_bias_add = plotdata[minidx_bias][0]
mcf_bias_x = plotdata[minidx_bias][self.x_axis_idx]
mcf_bias_y = plotdata[minidx_bias][self.y_axis_idx]
if 'bias' in search and 'scale' in search:
if cost_bias < cost_scale:
min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add,
'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost})
else:
min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier,
'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost})
elif 'scale' in search:
min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier,
'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost})
elif 'bias' in search:
min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add,
'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost})
else:
raise ValueError("(ERROR): Unknown search element (%s) requested." % ",".join(search))
if len(min_costs) < 2:
return min_costs[0]
else:
return min_costs
def get_specific_operating_point(self, req_x_axis_value=None, req_y_axis_value=None,
req_critical_value=None, vary_bias=False):
"""
Finds corresponding operating point on the current UCC, given a point on either x or y axis. Returns
a list of recipes how to achieve the point (x,y), for each component. If there is only one component,
returns a single recipe dict.
:param req_x_axis_value: requested x value on UCC (normalization status is taken from current display)
:param req_y_axis_value: requested y value on UCC (normalization status is taken from current display)
:param vary_bias: set to True when referring to bias-induced UCC (scale UCC default)
:return: list of dicts (recipes), or a single dict
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if np.sum([req_x_axis_value is not None, req_y_axis_value is not None, req_critical_value is not None]) != 1:
raise ValueError("ERROR(UCC): exactly one axis value must be requested at a time.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
recipe = []
for dc in range(len(self.d)):
plotdata = self.plotdata_for_bias[dc] if vary_bias else self.plotdata_for_scale[dc]
if req_x_axis_value is not None:
tgtidx = self.x_axis_idx
req_value = req_x_axis_value * xnorm
elif req_y_axis_value is not None:
tgtidx = self.y_axis_idx
req_value = req_y_axis_value * ynorm
elif req_critical_value is not None:
req_value = req_critical_value
tgtidx = 0 # first element in plotdata is always the critical value (scale of bias)
else:
raise RuntimeError("Unhandled case")
closestidx = np.argmin(np.asarray([np.abs(p[tgtidx] - req_value) for p in plotdata]))
recipe.append({'operation': ('bias' if vary_bias else 'scale'),
'modvalue': plotdata[closestidx][0],
'new_x': plotdata[closestidx][self.x_axis_idx] / xnorm,
'new_y': plotdata[closestidx][self.y_axis_idx] / ynorm})
if len(recipe) < 2:
return recipe[0]
else:
return recipe
def _find_min_cost_in_component(self, plotdata, idx1, idx2, cost1, cost2):
"""
Find s minimum cost function value and corresp. position index in plotdata
:param plotdata: liste of tuples
:param idx1: idx of x-axis item within the tuple
:param idx2: idx of y-axis item within the tuple
:param cost1: cost factor for x-axis unit
:param cost2: cost factor for y-axis unit
:return: min cost value, index within plotdata where minimum occurs
"""
raw = [cost1 * i[idx1] + cost2 * i[idx2] for i in plotdata]
minidx = np.argmin(raw)
return raw[minidx], minidx
def _sanitize_input(self, x):
"""
Replaces problematic values in input data (e.g, zero error bars)
:param x: single matrix of input data [n, 3]
:return: sanitized version of x
"""
if np.isclose(np.sum(x[:, 1]), 0.):
raise ValueError("ERROR(UCC): Provided lower bands are all zero.")
if np.isclose(np.sum(x[:, 2]), 0.):
raise ValueError("ERROR(UCC): Provided upper bands are all zero.")
for i in [1, 2]:
if any(np.isclose(x[:, i], 0.)):
print("WARN(UCC): some band values are 0. - REPLACING with positive minimum")
m = np.min(x[x[:, i] > 0, i])
x = np.where(np.isclose(x, 0.), m, x)
return x
def _calc_avg_excess(self, d, lb, ub):
"""
Excess is amount an error bar overshoots actual
:param d: pred-actual array
:param lb: lower band
:param ub: upper band
:return: average excess over array
"""
excess = np.zeros(d.shape)
posidx = np.where(d >= 0)[0]
excess[posidx] = np.where(ub[posidx] - d[posidx] < 0., 0., ub[posidx] - d[posidx])
negidx = np.where(d < 0)[0]
excess[negidx] = np.where(lb[negidx] + d[negidx] < 0., 0., lb[negidx] + d[negidx])
return np.mean(excess)
def _calc_avg_deficit(self, d, lb, ub):
"""
Deficit is error bar insufficiency: bar falls short of actual
:param d: pred-actual array
:param lb: lower band
:param ub: upper band
:return: average deficit over array
"""
deficit = np.zeros(d.shape)
posidx = np.where(d >= 0)[0]
deficit[posidx] = np.where(- ub[posidx] + d[posidx] < 0., 0., - ub[posidx] + d[posidx])
negidx = np.where(d < 0)[0]
deficit[negidx] = np.where(- lb[negidx] - d[negidx] < 0., 0., - lb[negidx] - d[negidx])
return np.mean(deficit)
def _calc_missrate_bandwidth_excess_deficit(self, d, lb, ub, scale=1.0, bias=0.0):
"""
Calculates recall at a given scale/bias, average bandwidth and average excess
:param d: delta
:param lb: lower band
:param ub: upper band
:param scale: scale * (x + bias)
:param bias:
:return: miss rate, average bandwidth, avg excess, avg deficit
"""
abslband = scale * np.where((lb + bias) < 0., 0., lb + bias)
absuband = scale * np.where((ub + bias) < 0., 0., ub + bias)
recall = np.sum((d >= - abslband) & (d <= absuband)) / len(d)
avgbandwidth = np.mean([absuband, abslband])
avgexcess = self._calc_avg_excess(d, abslband, absuband)
avgdeficit = self._calc_avg_deficit(d, abslband, absuband)
return 1 - recall, avgbandwidth, avgexcess, avgdeficit
def _calc_plotdata(self, d, lb, ub, vary_bias=False):
"""
Generates data necessary for various UCC metrics.
:param d: delta (predicted - actual) vector
:param ub: upper uncertainty bandwidth (above predicted)
:param lb: lower uncertainty bandwidth (below predicted) - all positive (bandwidth)
:param vary_bias: True will switch to additive bias instead of scale
:return: list. Elements are tuples (varyvalue, missrate, bandwidth, excess, deficit)
"""
# step 1: collect critical scale or bias values
critval = []
for i in range(len(d)):
if not vary_bias:
if d[i] >= 0:
critval.append(d[i] / ub[i])
else:
critval.append(-d[i] / lb[i])
else:
if d[i] >= 0:
critval.append(d[i] - ub[i])
else:
critval.append(-lb[i] - d[i])
critval = sorted(critval)
plotdata = []
for i in range(len(critval)):
if not vary_bias:
missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub,
scale=critval[i])
else:
missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub,
bias=critval[i])
plotdata.append((critval[i], missrate, bandwidth, excess, deficit))
return plotdata
def get_AUUCC(self, vary_bias=False, aucfct="trapz", partial_x=None, partial_y=None):
"""
returns approximate area under the curve on current coordinates, for each component.
:param vary_bias: False == varies scale, True == varies bias
:param aucfct: specifies AUC integrator (can be "trapz", "simps")
:param partial_x: tuple (x_min, x_max) defining interval on x to calc a a partial AUC.
The interval bounds refer to axes as visualized (ie. potentially normed)
:param partial_y: tuple (y_min, y_max) defining interval on y to calc a a partial AUC. partial_x must be None.
:return: list of floats with AUUCCs for each input component, or a single float, if there is only 1 component.
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
if partial_x is not None and partial_y is not None:
raise ValueError("ERROR(UCC): partial_x and partial_y can not be specified at the same time.")
assert(partial_x is None or (isinstance(partial_x, tuple) and len(partial_x)==2))
assert(partial_y is None or (isinstance(partial_y, tuple) and len(partial_y)==2))
# find starting point (where the x axis value starts to actually change)
rv = []
# do this for individual streams
xind = self.x_axis_idx
aucfct = simps if aucfct == "simps" else trapz
for s in range(len(self.d)):
plotdata = self.plotdata_for_bias[s] if vary_bias else self.plotdata_for_scale[s]
prev = plotdata[0][xind]
t = 1
cval = plotdata[t | ||
][xind]
while cval == prev and t < len(plotdata) - 1:
t += 1
prev = cval
cval = plotdata[t][xind]
startt = t - 1 # from here, it's a valid function
endtt = len(plotdata)
if startt >= endtt - 2:
rvs = 0. # no area
else:
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
y=[(plotdata[i][self.y_axis_idx]) / ynorm for i in range(startt, endtt)]
x=[(plotdata[i][self.x_axis_idx]) / xnorm for i in range(startt, endtt)]
if partial_x is not None:
from_i = self._find_closest_index(partial_x[0], x)
to_i = self._find_closest_index(partial_x[1], x) + 1
elif partial_y is not None:
from_i = self._find_closest_index(partial_y[0], y)
to_i = self._find_closest_index(partial_y[1], y)
if from_i > to_i: # y is in reverse order
from_i, to_i = to_i, from_i
to_i += 1 # as upper bound in array indexing
else:
from_i = 0
to_i = len(x)
to_i = min(to_i, len(x))
if to_i < from_i:
raise ValueError("ERROR(UCC): Failed to find an appropriate partial-AUC interval in the data.")
if to_i - from_i < 2:
raise RuntimeError("ERROR(UCC): There are too few samples (1) in the partial-AUC interval specified")
rvs = aucfct(x=x[from_i:to_i], y=y[from_i:to_i])
rv.append(rvs)
if len(rv) < 2:
return rv[0]
else:
return rv
@ staticmethod
def _find_closest_index(value, array):
"""
Returns an index of the 'array' element closest in value to 'value'
:param value:
:param array:
:return:
"""
return np.argmin(np.abs(np.asarray(array)-value))
def _get_single_OP(self, d, lb, ub, scale=1., bias=0.):
"""
Returns Operating Point for original input data, on coordinates currently set up, given a scale/bias.
:param scale:
:param bias:
:return: single tuple (x point, y point, unit of x, unit of y)
"""
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
auxop = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub, scale=scale, bias=bias)
op = [0.] + [i for i in auxop] # mimic plotdata (first element ignored here)
return (op[self.x_axis_idx] / xnorm, op[self.y_axis_idx] / ynorm, xnorm, ynorm)
def get_OP(self, scale=1., bias=0.):
"""
Returns all Operating Points for original input data, on coordinates currently set up, given a scale/bias.
:param scale:
:param bias:
:return: list of tuples (x point, y point, unit of x, unit of y) or a single tuple if there is only
1 component.
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
op = []
for dc in range(len(self.d)):
op.append(self._get_single_OP(self.d[dc], self.lb[dc], self.ub[dc], scale=scale, bias=bias))
if len(op) < 2:
return op[0]
else:
return op
def plot_UCC(self, titlestr='', syslabel='model', outfn=None, vary_bias=False, markers=None,
xlim=None, ylim=None, **kwargs):
""" Will plot/display the UCC based on current data and coordinates. Multiple curves will be shown
if there are multiple data components (via fit())
:param titlestr: Plot title string
:param syslabel: list is label strings to appear in the plot legend. Can be single, if one component.
:param outfn: base name of an image file to be created (will append .png before creating)
:param vary_bias: True will switch to varying additive bias (default is multiplicative scale)
:param markers: None or a list of marker styles to be used for each curve.
List must be same or longer than number of components.
Markers can be one among these ['o', 's', 'v', 'D', '+'].
:param xlim: tuples or lists of specifying the range for the x axis, or None (auto)
:param ylim: tuples or lists of specifying the range for the y axis, or None (auto)
:param `**kwargs`: Additional arguments passed to the main plot call.
:return: list of areas under the curve (or single area, if one data component)
list of operating points (or single op): format of an op is tuple (xaxis value, yaxis value, xunit, yunit)
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
if not isinstance(syslabel, list):
syslabel = [syslabel]
assert (len(syslabel) == len(self.d))
assert (markers is None or (isinstance(markers, list) and len(markers) >= len(self.d)))
# main plot of (possibly multiple) datasets
plt.figure()
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
op_info = []
auucc = self.get_AUUCC(vary_bias=vary_bias)
auucc = [auucc] if not isinstance(auucc, list) else auucc
for s in range(len(self.d)):
# original operating point
x_op, y_op, x_unit, y_unit = self._get_single_OP(self.d[s], self.lb[s], self.ub[s])
op_info.append((x_op, y_op, x_unit, y_unit))
# display chart
plotdata = self.plotdata_for_scale[s] if not vary_bias else self.plotdata_for_bias[s]
axisX_data = [i[self.x_axis_idx] / xnorm for i in plotdata]
axisY_data = [i[self.y_axis_idx] / ynorm for i in plotdata]
marker = None
if markers is not None: marker = markers[s]
p = plt.plot(axisX_data, axisY_data, label=syslabel[s] + (" (AUC=%.3f)" % auucc[s]), marker=marker, **kwargs)
if s + 1 == len(self.d):
oplab = 'OP'
else:
oplab = None
plt.plot(x_op, y_op, marker='o', color=p[0].get_color(), label=oplab, markerfacecolor='w',
markeredgewidth=1.5, markeredgecolor=p[0].get_color())
axisX_label = self.axes_idx2descr[self.x_axis_idx]
axisY_label = self.axes_idx2descr[self.y_axis_idx]
axisX_units = "(raw)" if np.isclose(xnorm, 1.0) else "[in std deviations]"
axisY_units = "(raw)" if np.isclose(ynorm, 1.0) else "[in std deviations]"
axisX_label += ' ' + axisX_units
axisY_label += ' ' + axisY_units
if ylim is not None:
plt.ylim(ylim)
if xlim is not None:
plt.xlim(xlim)
plt.xlabel(axisX_label)
plt.ylabel(axisY_label)
plt.legend()
plt.title(titlestr)
plt.grid()
if outfn is None:
plt.show()
else:
plt.savefig(outfn)
if len(auucc) < 2:
auucc = auucc[0]
op_info = op_info[0]
return auucc, op_info
<s> from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
<s> import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class BuiltinUQ(ABC):
""" BuiltinUQ is the base class for any algorithm that has UQ built into it.
"""
def __init__(self, *argv, **kwargs):
""" Initialize a BuiltinUQ object.
"""
@abc.abstractmethod
def fit(self, *argv, **kwargs):
""" Learn the UQ related parameters..
"""
raise NotImplementedError
@abc.abstractmethod
def predict(self, *argv, **kwargs):
""" Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric
uncertainty in the predictions.
"""
raise NotImplementedError
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
<s> import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class PostHocUQ(ABC):
""" PostHocUQ is the base class for any algorithm that quantifies uncertainty of a pre-trained model.
"""
def __init__(self, *argv, **kwargs):
""" Initialize a BuiltinUQ object.
"""
@abc.abstractmethod
def _process_pretrained_model(self, *argv, **kwargs):
""" Method to process the pretrained model that requires UQ.
"""
raise NotImplementedError
@abc.abstractmethod
def predict(self, *argv, **kwargs):
""" Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric
uncertainty in the predictions.
"""
raise NotImplementedError
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def get_params(self):
"""
This method should not take any arguments and returns a dict of the __init__ parameters.
"""
raise NotImplementedError
<s><s> from collections import namedtuple
import numpy as np
import torch
from scipy.stats import norm
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from uq360.algorithms.builtinuq import BuiltinUQ
from uq360.models.heteroscedastic_mlp import GaussianNoiseMLPNet as _MLPNet
np.random.seed(42)
torch.manual_seed(42)
class HeteroscedasticRegression(BuiltinUQ):
""" Wrapper for heteroscedastic regression. We learn to predict targets given features,
assuming that the targets are noisy and that the amount of noise varies between data points.
https://en.wikipedia.org/wiki/Heteroscedasticity
"""
def __init__(self, model_type=None, model=None, config=None, device=None, verbose=True):
"""
Args:
model_type: The base model architecture. Currently supported values are [mlp].
mlp modeltype learns a multi-layer perceptron with a heteroscedastic Gaussian likelihood. Both the
mean and variance of the Gaussian are functions of the data point ->git N(y_n | mlp_mu(x_n), mlp_var(x_n))
model: (optional) The prediction model. Currently support pytorch models that returns mean and log variance.
config: dictionary containing the config parameters for the model.
device: device used for pytorch models ignored otherwise.
verbose: if True, print statements with the progress are enabled.
"""
super(HeteroscedasticRegression).__init__()
self.config = config
self.device = device
self.verbose = verbose
if model_type == "mlp":
self.model_type = model_type
self.model = _MLPNet(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
elif model_type == "custom":
self.model_type = model_type
self.model = model
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config, "model": self.model,
"device": self.device, "verbose": self.verbose}
def _loss(self, y_true, y_pred_mu, y_pred_log_var):
return torch.mean(0.5 * torch.exp(-y_pred_log_var) * torch.abs(y_true - y_pred_mu) ** 2 +
0.5 * y_pred_log_var)
def fit(self, X, y):
""" Fit the Heteroscedastic Regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
X = torch.from_numpy(X).float().to(self.device)
y = torch.from_numpy(y).float().to(self.device)
dataset_loader = DataLoader(
TensorDataset(X,y),
batch_size=self.config["batch_size"]
)
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.config["lr"])
for epoch in range(self.config["num_epochs"]):
avg_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.model.train()
batch_y_pred_mu, batch_y_pred_log_var = self.model(batch_x)
loss = self.model.loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item()/len(dataset_loader)
| ||
if self.verbose:
print("Epoch: {}, loss = {}".format(epoch, avg_loss))
return self
def predict(self, X, return_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
self.model.eval()
X = torch.from_numpy(X).float().to(self.device)
dataset_loader = DataLoader(
X,
batch_size=self.config["batch_size"]
)
y_mean_list = []
y_log_var_list = []
for batch_x in dataset_loader:
batch_y_pred_mu, batch_y_pred_log_var = self.model(batch_x)
y_mean_list.append(batch_y_pred_mu.data.cpu().numpy())
y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy())
y_mean = np.concatenate(y_mean_list)
y_log_var = np.concatenate(y_log_var_list)
y_std = np.sqrt(np.exp(y_log_var))
y_lower = y_mean - 2.0*y_std
y_upper = y_mean + 2.0*y_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
return res
<s> from .heteroscedastic_regression import HeteroscedasticRegression<s> from collections import namedtuple
import numpy as np
from sklearn.calibration import CalibratedClassifierCV
from sklearn.preprocessing import LabelEncoder
from uq360.utils.misc import DummySklearnEstimator
from uq360.algorithms.posthocuq import PostHocUQ
class ClassificationCalibration(PostHocUQ):
"""Post hoc calibration of classification models. Currently wraps `CalibratedClassifierCV` from sklearn and allows
non-sklearn models to be calibrated.
"""
def __init__(self, num_classes, fit_mode="features", method='isotonic', base_model_prediction_func=None):
"""
Args:
num_classes: number of classes.
fit_mode: features or probs. If probs the `fit` and `predict` operate on the base models probability scores,
useful when these are precomputed.
method: isotonic or sigmoid.
base_model_prediction_func: the function that takes in the input features and produces base model's
probability scores. This is ignored when operating in `probs` mode.
"""
super(ClassificationCalibration).__init__()
if fit_mode == "probs":
# In this case, the fit assumes that it receives the probability scores of the base model.
# create a dummy estimator
self.base_model = DummySklearnEstimator(num_classes, lambda x: x)
else:
self.base_model = DummySklearnEstimator(num_classes, base_model_prediction_func)
self.method = method
def get_params(self, deep=True):
return {"num_classes": self.num_classes, "fit_mode": self.fit_mode, "method": self.method,
"base_model_prediction_func": self.base_model_prediction_func}
def _process_pretrained_model(self, base_model):
return base_model
def fit(self, X, y):
""" Fits calibration model using the provided calibration set.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
self.base_model.label_encoder_ = LabelEncoder().fit(y)
self.calib_model = CalibratedClassifierCV(base_estimator=self.base_model,
cv="prefit",
method=self.method)
self.calib_model.fit(X, y)
return self
def predict(self, X):
"""
Obtain calibrated predictions for the test points.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
Returns:
namedtuple: A namedtupe that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_prob: ndarray of shape (n_samples, n_classes)
Predicted probability scores of the classes.
"""
y_prob = self.calib_model.predict_proba(X)
if len(np.shape(y_prob)) == 1:
y_pred_labels = y_prob > 0.5
else:
y_pred_labels = np.argmax(y_prob, axis=1)
Result = namedtuple('res', ['y_pred', 'y_prob'])
res = Result(y_pred_labels, y_prob)
return res
<s> from .classification_calibration import ClassificationCalibration
<s> from collections import namedtuple
import botorch
import gpytorch
import numpy as np
import torch
from botorch.models import SingleTaskGP
from botorch.utils.transforms import normalize
from gpytorch.constraints import GreaterThan
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from uq360.algorithms.builtinuq import BuiltinUQ
np.random.seed(42)
torch.manual_seed(42)
class HomoscedasticGPRegression(BuiltinUQ):
""" A wrapper around Botorch SingleTask Gaussian Process Regression [1]_ with homoscedastic noise.
References:
.. [1] https://botorch.org/api/models.html#singletaskgp
"""
def __init__(self,
kernel=gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()),
likelihood=None,
config=None):
"""
Args:
kernel: gpytorch kernel function with default set to `RBFKernel` with output scale.
likelihood: gpytorch likelihood function with default set to `GaussianLikelihood`.
config: dictionary containing the config parameters for the model.
"""
super(HomoscedasticGPRegression).__init__()
self.config = config
self.kernel = kernel
self.likelihood = likelihood
self.model = None
self.scaler = StandardScaler()
self.X_bounds = None
def get_params(self, deep=True):
return {"kernel": self.kernel, "likelihood": self.likelihood, "config": self.config}
def fit(self, X, y, **kwargs):
"""
Fit the GP Regression model.
Additional arguments relevant for SingleTaskGP fitting can be passed to this function.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
**kwargs: Additional arguments relevant for SingleTaskGP fitting.
Returns:
self
"""
y = self.scaler.fit_transform(y)
X, y = torch.tensor(X), torch.tensor(y)
self.X_bounds = X_bounds = torch.stack([X.min() * torch.ones(X.shape[1]),
X.max() * torch.ones(X.shape[1])])
X = normalize(X, X_bounds)
model_homo = SingleTaskGP(train_X=X, train_Y=y, covar_module=self.kernel, likelihood=self.likelihood, **kwargs)
model_homo.likelihood.noise_covar.register_constraint("raw_noise", GreaterThan(1e-5))
model_homo_marginal_log_lik = gpytorch.mlls.ExactMarginalLogLikelihood(model_homo.likelihood, model_homo)
botorch.fit.fit_gpytorch_model(model_homo_marginal_log_lik)
model_homo_marginal_log_lik.eval()
self.model = model_homo_marginal_log_lik
self.inferred_observation_noise = self.scaler.inverse_transform(self.model.likelihood.noise.detach().numpy()[0].reshape(1,1)).squeeze()
return self
def predict(self, X, return_dists=False, return_epistemic=False, return_epistemic_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
return_epistemic: if True, the epistemic upper and lower bounds are returned.
return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions
is returned.
Returns:
namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
X = torch.tensor(X)
X_test_norm = normalize(X, self.X_bounds)
self.model.eval()
with torch.no_grad():
posterior = self.model.model.posterior(X_test_norm)
y_mean = posterior.mean
#y_epi_std = torch.sqrt(posterior.variance)
y_lower_epistemic, y_upper_epistemic = posterior.mvn.confidence_region()
predictive_posterior = self.model.model.posterior(X_test_norm, observation_noise=True)
#y_std = torch.sqrt(predictive_posterior.variance)
y_lower_total, y_upper_total = predictive_posterior.mvn.confidence_region()
y_mean, y_lower, y_upper, y_lower_epistemic, y_upper_epistemic = self.scaler.inverse_transform(y_mean.numpy()).squeeze(), \\
self.scaler.inverse_transform(y_lower_total.numpy()).squeeze(),\\
self.scaler.inverse_transform(y_upper_total.numpy()).squeeze(),\\
self.scaler.inverse_transform(y_lower_epistemic.numpy()).squeeze(),\\
self.scaler.inverse_transform(y_upper_epistemic.numpy()).squeeze()
y_epi_std = (y_upper_epistemic - y_lower_epistemic) / 4.0
y_std = (y_upper_total - y_lower_total) / 4.0
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_epistemic:
Result = namedtuple('res', Result._fields + ('y_lower_epistemic', 'y_upper_epistemic',))
res = Result(*res, y_lower_epistemic=y_lower_epistemic, y_upper_epistemic=y_upper_epistemic)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
if return_epistemic_dists:
epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_epistemic_dists',))
res = Result(*res, y_epistemic_dists=epi_dists)
return res
<s> from .homoscedastic_gaussian_process_regression import HomoscedasticGPRegression<s> from collections import namedtuple
from sklearn.ensemble import GradientBoostingRegressor
from uq360.algorithms.builtinuq import BuiltinUQ
class QuantileRegression(BuiltinUQ):
"""Quantile Regression uses quantile loss and learns two separate models for the upper and lower quantile
to obtain the prediction intervals.
"""
def __init__(self, model_type="gbr | ||
", config=None):
"""
Args:
model_type: The base model used for predicting a quantile. Currently supported values are [gbr].
gbr is sklearn GradientBoostingRegressor.
config: dictionary containing the config parameters for the model.
"""
super(QuantileRegression).__init__()
if config is not None:
self.config = config
else:
self.config = {}
if "alpha" not in self.config:
self.config["alpha"] = 0.95
if model_type == "gbr":
self.model_type = model_type
self.model_mean = GradientBoostingRegressor(
loss='ls',
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"]
)
self.model_upper = GradientBoostingRegressor(
loss='quantile',
alpha=self.config["alpha"],
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"]
)
self.model_lower = GradientBoostingRegressor(
loss='quantile',
alpha=1.0 - self.config["alpha"],
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"])
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config}
def fit(self, X, y):
""" Fit the Quantile Regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
self.model_mean.fit(X, y)
self.model_lower.fit(X, y)
self.model_upper.fit(X, y)
return self
def predict(self, X):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_mean = self.model_mean.predict(X)
y_lower = self.model_lower.predict(X)
y_upper = self.model_upper.predict(X)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
return res
<s> from .quantile_regression import QuantileRegression
<s> import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class BlackboxMetamodelRegression(PostHocUQ):
""" Extracts confidence scores from black-box regression models using a meta-model [2]_ .
References:
.. [2] Chen, Tongfei, et al. Confidence scoring using whitebox meta-models with linear classifier probes.
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
"""
Instantiates a model by name passed in 'mdltype'
:param mdltype: string with name (must be supprted)
:param config: dict with args passed in the instantiation call
:return: mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'gbr':
mdl = GradientBoostingRegressor(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \\"%s\\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
"""
Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance
:param model: string, class, or instance. Class and instance must have certain methods callable.
:param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., 'gbr'),
(3) Base model class declaration (e.g., sklearn.linear_model.LinearRegressor). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have required callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(BlackboxMetamodelRegression).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbr'
self.meta_model_default = 'gbr'
self.base_config_default = {'loss': 'ls', 'n_estimators': 300, 'max_depth': 10, 'learning_rate': 0.001,
'min_samples_leaf': 10, 'min_samples_split': 10, 'random_state': self.random_seed}
self.meta_config_default = {'loss': 'quantile', 'alpha': 0.95, 'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model
:param y: ground truth for the base model
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert(len(meta_train_data)==2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta = self.base_model.predict(X_meta)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# used base input and output as meta input
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta)
# train meta model to predict abs diff
self.meta_model.fit(X_meta_in, np.abs(y_hat_meta - y_meta))
return self
def _process_pretrained_model(self, X, y_hat):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat: [nsamples,]
:return: array with new features [nsamples, newdim]
"""
y_hat_meta_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_meta_prime])
return X_meta_in
def predict(self, X):
"""
Generate prediction and uncertainty bounds for data X.
:param X: input features
:return: namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_hat = self.base_model.predict(X)
y_hat_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_prime])
z_hat = self.meta_model.predict(X_meta_in)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_hat, y_hat - z_hat, y_hat + z_hat)
return res
<s> import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class BlackboxMetamodelClassification(PostHocUQ):
""" Extracts confidence scores from black-box classification models using a meta-model [4]_ .
References:
.. [4] Chen, Tongfei, et al. "Confidence scoring using whitebox meta-models with linear classifier probes."
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
""" Instantiates a model by name passed in 'mdltype'.
Args:
mdltype: string with name (must be supported)
config: dict with args passed in the instantiation call
Returns:
mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'lr':
mdl = LogisticRegression(**config)
elif mdltype == 'gbm':
mdl = GradientBoostingClassifier(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \\"%s\\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
""" Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance.
:param model: string, class, or instance. Class and instance must have certain methods callable.
: | ||
param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., logistic regression 'lr' or gradient boosting machine 'gbm'),
(3) Base model class declaration (e.g., sklearn.linear_model.LogisticRegression). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have certain callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(BlackboxMetamodelClassification).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbm'
self.meta_model_default = 'lr'
self.base_config_default = {'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.meta_config_default = {'penalty': 'l1', 'C': 1, 'solver': 'liblinear', 'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def _process_pretrained_model(self, X, y_hat_proba):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat_proba: [nsamples, nclasses]
:return: array with new features [nsamples, newdim]
"""
assert (len(y_hat_proba.shape) == 2)
assert (X.shape[0] == y_hat_proba.shape[0])
# sort the probs sample by sample
faux1 = np.sort(y_hat_proba, axis=-1)
# add delta between top and second candidate
faux2 = np.expand_dims(faux1[:, -1] - faux1[:, -2], axis=-1)
return np.hstack([X, faux1, faux2])
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model,
array-like of shape (n_samples, n_features).
Features vectors of the training data.
:param y: ground truth for the base model,
array-like of shape (n_samples,)
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert (len(meta_train_data) == 2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta_proba = self.base_model.predict_proba(X_meta)
# determine correct-incorrect outcome - these are targets for the meta model trainer
# y_hat_meta_targets = np.asarray((y_meta == np.argmax(y_hat_meta_proba, axis=-1)), dtype=np.int) -- Fix for python 3.8.11 update (in 2.9.0.8)
y_hat_meta_targets = np.asarray((y_meta == np.argmax(y_hat_meta_proba, axis=-1)), dtype=int)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# get input features for meta training
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta_proba)
# train meta model to predict 'correct' vs. 'incorrect' of the base
self.meta_model.fit(X_meta_in, y_hat_meta_targets)
return self
def predict(self, X):
"""
Generate a base prediction along with uncertainty/confidence for data X.
:param X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
:return: namedtuple: A namedtuple that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_score: ndarray of shape (n_samples,)
Confidence score the test points.
"""
y_hat_proba = self.base_model.predict_proba(X)
y_hat = np.argmax(y_hat_proba, axis=-1)
X_meta_in = self._process_pretrained_model(X, y_hat_proba)
z_hat = self.meta_model.predict_proba(X_meta_in)
index_of_class_1 = np.where(self.meta_model.classes_ == 1)[0][0] # class 1 corresponds to probab of positive/correct outcome
Result = namedtuple('res', ['y_pred', 'y_score'])
res = Result(y_hat, z_hat[:, index_of_class_1])
return res
<s> from .blackbox_metamodel_regression import BlackboxMetamodelRegression
from .blackbox_metamodel_classification import BlackboxMetamodelClassification
<s> from collections import namedtuple
from uq360.algorithms.posthocuq import PostHocUQ
from uq360.utils.misc import form_D_for_auucc
from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
class UCCRecalibration(PostHocUQ):
""" Recalibration a regression model to specified operating point using Uncertainty Characteristics Curve.
"""
def __init__(self, base_model):
"""
Args:
base_model: pretrained model to be recalibrated.
"""
super(UCCRecalibration).__init__()
self.base_model = self._process_pretrained_model(base_model)
self.ucc = None
def get_params(self, deep=True):
return {"base_model": self.base_model}
def _process_pretrained_model(self, base_model):
return base_model
def fit(self, X, y):
"""
Fit the Uncertainty Characteristics Curve.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3]
bwu = y_pred_upper - y_pred_mean
bwl = y_pred_mean - y_pred_lower
self.ucc = UncertaintyCharacteristicsCurve()
self.ucc.fit(form_D_for_auucc(y_pred_mean, bwl, bwu), y.squeeze())
return self
def predict(self, X, missrate=0.05):
"""
Generate prediction and uncertainty bounds for data X.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
missrate: desired missrate of the new operating point, set to 0.05 by default.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
C = self.ucc.get_specific_operating_point(req_y_axis_value=missrate, vary_bias=False)
new_scale = C['modvalue']
y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3]
bwu = y_pred_upper - y_pred_mean
bwl = y_pred_mean - y_pred_lower
if C['operation'] == 'bias':
calib_y_pred_upper = y_pred_mean + (new_scale + bwu) # lower bound width
calib_y_pred_lower = y_pred_mean - (new_scale + bwl) # Upper bound width
else:
calib_y_pred_upper = y_pred_mean + (new_scale * bwu) # lower bound width
calib_y_pred_lower = y_pred_mean - (new_scale * bwl) # Upper bound width
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_pred_mean, calib_y_pred_lower, calib_y_pred_upper)
return res
<s> from .ucc_recalibration import UCCRecalibration
<s> from collections import namedtuple
import numpy as np
from uq360.algorithms.posthocuq import PostHocUQ
class InfinitesimalJackknife(PostHocUQ):
"""
Performs a first order Taylor series expansion around MLE / MAP fit.
Requires the model being probed to be twice differentiable.
"""
def __init__(self, params, gradients, hessian, config):
""" Initialize IJ.
Args:
params: MLE / MAP fit around which uncertainty is sought. d*1
gradients: Per data point gradients, estimated at the MLE / MAP fit. d*n
hessian: Hessian evaluated at the MLE / MAP fit. d*d
"""
super(InfinitesimalJackknife).__init__()
self.params_one = params
self.gradients = gradients
self.hessian = hessian
self.d, self.n = gradients.shape
self.dParams_dWeights = -np.linalg.solve(self.hessian, self.gradients)
self.approx_dParams_dWeights = -np.linalg.solve(np.diag(np.diag(self.hessian)), self.gradients)
self.w_one = np.ones([self.n])
self.config = config
def get_params(self, deep=True):
return {"params": self.params, "config": self.config, "gradients": self.gradients,
"hessian": self.hessian}
def _process_pretrained_model(self, *argv, **kwargs):
pass
def get_parameter_uncertainty(self):
if (self.config['resampling_strategy'] == "jackknife") or (self.config['resampling_strategy'] == "jackknife+"):
w_query = np.ones_like(self.w_one)
resampled_params = np.zeros([self.n, self.d])
for i in np.arange(self.n):
w_query[i] = 0
resampled_params[i] = self.ij(w_query)
w_query[i] = 1
return np.cov(resampled_params), resampled_params
elif self.config['resampling_strategy'] == "bootstrap":
pass
else:
raise NotImplemented | ||
Error("Only jackknife, jackknife+, and bootstrap resampling strategies are supported")
def predict(self, X, model):
"""
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
model: model object, must implement a set_parameters function
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
n, _ = X.shape
y_all = model.predict(X)
_, d_out = y_all.shape
params_cov, params = self.get_parameter_uncertainty()
if d_out > 1:
print("Quantiles are computed independently for each dimension. May not be accurate.")
y = np.zeros([params.shape[0], n, d_out])
for i in np.arange(params.shape[0]):
model.set_parameters(params[i])
y[i] = model.predict(X)
y_lower = np.quantile(y, q=0.5 * self.config['alpha'], axis=0)
y_upper = np.quantile(y, q=(1. - 0.5 * self.config['alpha']), axis=0)
y_mean = y.mean(axis=0)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
return res
def ij(self, w_query):
"""
Args:
w_query: A n*1 vector to query parameters at.
Return:
new parameters at w_query
"""
assert w_query.shape[0] == self.n
return self.params_one + self.dParams_dWeights @ (w_query-self.w_one).T
def approx_ij(self, w_query):
"""
Args:
w_query: A n*1 vector to query parameters at.
Return:
new parameters at w_query
"""
assert w_query.shape[0] == self.n
return self.params_one + self.approx_dParams_dWeights @ (w_query-self.w_one).T<s> from .infinitesimal_jackknife import InfinitesimalJackknife
<s> import copy
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.utils.data as data_utils
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from uq360.algorithms.builtinuq import BuiltinUQ
from uq360.models.bayesian_neural_networks.bnn_models import horseshoe_mlp, bayesian_mlp
class BnnRegression(BuiltinUQ):
"""
Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for regression.
References:
.. [6] Ghosh, Soumya, Jiayu Yao, and Finale Doshi-Velez. "Structured variational learning of Bayesian neural
networks with horseshoe priors." International Conference on Machine Learning. PMLR, 2018.
"""
def __init__(self, config, prior="Gaussian"):
"""
Args:
config: a dictionary specifying network and learning hyperparameters.
prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe
"""
super(BnnRegression, self).__init__()
self.config = config
if prior == "Gaussian":
self.net = bayesian_mlp.BayesianRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'])
self.config['use_reg_hshoe'] = None
elif prior == "Hshoe":
self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'])
self.config['use_reg_hshoe'] = False
elif prior == "RegHshoe":
self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'],
use_reg_hshoe=config['use_reg_hshoe'])
self.config['use_reg_hshoe'] = True
else:
raise NotImplementedError("'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe")
def get_params(self, deep=True):
return {"prior": self.prior, "config": self.config}
def fit(self, X, y):
""" Fit the BNN regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
torch.manual_seed(1234)
optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size'])
neg_elbo = torch.zeros([self.config['num_epochs'], 1])
params_store = {}
for epoch in range(self.config['num_epochs']):
loss = self.net.neg_elbo(num_batches=1, x=X, y=y.float().unsqueeze(dim=1)) / X.shape[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
if hasattr(self.net, 'fixed_point_updates'):
# for hshoe or regularized hshoe nets
self.net.fixed_point_updates()
neg_elbo[epoch] = loss.item()
if (epoch + 1) % 10 == 0:
# print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0])
print('Epoch[{}/{}], neg elbo: {:.6f}, noise var: {:.6f}'
.format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item() / X.shape[0],
self.net.get_noise_var()))
params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all.
best_model_id = neg_elbo.argmin() # loss_val_store.argmin() #
self.net.load_state_dict(params_store[best_model_id.item()])
return self
def predict(self, X, mc_samples=100, return_dists=False, return_epistemic=True, return_epistemic_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
mc_samples: Number of Monte-Carlo samples.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
return_epistemic: if True, the epistemic upper and lower bounds are returned.
return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions
is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
epistemic_out = np.zeros([mc_samples, X.shape[0]])
total_out = np.zeros([mc_samples, X.shape[0]])
for s in np.arange(mc_samples):
pred = self.net(X).data.numpy().ravel()
epistemic_out[s] = pred
total_out[s] = pred + np.sqrt(self.net.get_noise_var()) * np.random.randn(pred.shape[0])
y_total_std = np.std(total_out, axis=0)
y_epi_std = np.std(epistemic_out, axis=0)
y_mean = np.mean(total_out, axis=0)
y_lower = y_mean - 2 * y_total_std
y_upper = y_mean + 2 * y_total_std
y_epi_lower = y_mean - 2 * y_epi_std
y_epi_upper = y_mean + 2 * y_epi_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_epistemic:
Result = namedtuple('res', Result._fields + ('lower_epistemic', 'upper_epistemic',))
res = Result(*res, lower_epistemic=y_epi_lower, upper_epistemic=y_epi_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_total_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
if return_epistemic_dists:
epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_epistemic_dists',))
res = Result(*res, y_epistemic_dists=epi_dists)
return res
class BnnClassification(BuiltinUQ):
"""
Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for classification.
"""
def __init__(self, config, prior="Gaussian", device=None):
"""
Args:
config: a dictionary specifying network and learning hyperparameters.
prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe
"""
super(BnnClassification, self).__init__()
self.config = config
self.device = device
if prior == "Gaussian":
self.net = bayesian_mlp.BayesianClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'])
self.config['use_reg_hshoe'] = None
elif prior == "Hshoe":
self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'])
self.config['use_reg_hshoe'] = False
elif prior == "RegHshoe":
self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'],
use_reg_hshoe=config['use_reg_hshoe'])
self.config['use_reg_hshoe'] = True
else:
raise NotImplementedError("'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe")
if "batch_size" not in self.config:
self.config["batch_size"] = 50
self.net = self.net.to(device)
def get_params(self, deep=True):
return {"prior": self.prior, "config": self.config, "device": self.device}
def fit(self, X=None, y=None, train_loader=None):
""" Fits BNN regression model.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
Ignored if train_loader is not None.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Ignored if train_loader is not None.
train_loader: pytorch train_loader object.
Returns:
self
"""
if train_loader is None:
train = data_utils.TensorDataset(torch.Tensor(X), torch.Tensor(y.values).long())
train_loader = data_utils.DataLoader(train, batch_size=self.config['batch_size'], shuffle=True)
torch.manual_seed(1234)
optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size'])
neg_elbo = torch.zeros([self.config['num_epochs'], 1])
params_store = {}
for epoch in range(self.config['num_epochs']):
avg_loss = 0.0
for batch_x, batch_y in train_loader:
loss = self.net.neg_elbo(num_batches=len(train_ | ||
loader), x=batch_x, y=batch_y) / batch_x.size(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if hasattr(self.net, 'fixed_point_updates'):
# for hshoe or regularized hshoe nets
self.net.fixed_point_updates()
avg_loss += loss.item()
neg_elbo[epoch] = avg_loss / len(train_loader)
if (epoch + 1) % 10 == 0:
# print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0])
print('Epoch[{}/{}], neg elbo: {:.6f}'
.format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item()))
params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all.
best_model_id = neg_elbo.argmin() # loss_val_store.argmin() #
self.net.load_state_dict(params_store[best_model_id.item()])
return self
def predict(self, X, mc_samples=100):
"""
Obtain calibrated predictions for the test points.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
mc_samples: Number of Monte-Carlo samples.
Returns:
namedtuple: A namedtupe that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_prob: ndarray of shape (n_samples, n_classes)
Predicted probability scores of the classes.
y_prob_var: ndarray of shape (n_samples,)
Variance of the prediction on the test points.
y_prob_samples: ndarray of shape (mc_samples, n_samples, n_classes)
Samples from the predictive distribution.
"""
X = torch.Tensor(X)
y_prob_samples = [F.softmax(self.net(X), dim=1).detach().numpy() for _ in np.arange(mc_samples)]
y_prob_samples_stacked = np.stack(y_prob_samples)
prob_mean = np.mean(y_prob_samples_stacked, 0)
prob_var = np.std(y_prob_samples_stacked, 0) ** 2
if len(np.shape(prob_mean)) == 1:
y_pred_labels = prob_mean > 0.5
else:
y_pred_labels = np.argmax(prob_mean, axis=1)
Result = namedtuple('res', ['y_pred', 'y_prob', 'y_prob_var', 'y_prob_samples'])
res = Result(y_pred_labels, prob_mean, prob_var, y_prob_samples)
return res
<s><s> from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from scipy.stats import norm
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from uq360.algorithms.builtinuq import BuiltinUQ
np.random.seed(42)
torch.manual_seed(42)
class _MLPNet_Main(torch.nn.Module):
def __init__(self, num_features, num_outputs, num_hidden):
super(_MLPNet_Main, self).__init__()
self.fc = torch.nn.Linear(num_features, num_hidden)
self.fc_mu = torch.nn.Linear(num_hidden, num_outputs)
self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs)
def forward(self, x):
x = F.relu(self.fc(x))
mu = self.fc_mu(x)
log_var = self.fc_log_var(x)
return mu, log_var
class _MLPNet_Aux(torch.nn.Module):
def __init__(self, num_features, num_outputs, num_hidden):
super(_MLPNet_Aux, self).__init__()
self.fc = torch.nn.Linear(num_features, num_hidden)
self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs)
def forward(self, x):
x = F.relu(self.fc(x))
log_var = self.fc_log_var(x)
return log_var
class AuxiliaryIntervalPredictor(BuiltinUQ):
""" Auxiliary Interval Predictor [1]_ uses an auxiliary model to encourage calibration of the main model.
References:
.. [1] Thiagarajan, J. J., Venkatesh, B., Sattigeri, P., & Bremer, P. T. (2020, April). Building calibrated deep
models via uncertainty matching with auxiliary interval predictors. In Proceedings of the AAAI Conference on
Artificial Intelligence (Vol. 34, No. 04, pp. 6005-6012). https://arxiv.org/abs/1909.04079
"""
def __init__(self, model_type=None, main_model=None, aux_model=None, config=None, device=None, verbose=True):
"""
Args:
model_type: The model type used to build the main model and the auxiliary model. Currently supported values
are [mlp, custom]. `mlp` modeltype learns a mlp neural network using pytorch framework. For `custom` the user
provide `main_model` and `aux_model`.
main_model: (optional) The main prediction model. Currently support pytorch models that return mean and log variance.
aux_model: (optional) The auxiliary prediction model. Currently support pytorch models that return calibrated log variance.
config: dictionary containing the config parameters for the model.
device: device used for pytorch models ignored otherwise.
verbose: if True, print statements with the progress are enabled.
"""
super(AuxiliaryIntervalPredictor).__init__()
self.config = config
self.device = device
self.verbose = verbose
if model_type == "mlp":
self.model_type = model_type
self.main_model = _MLPNet_Main(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
self.aux_model = _MLPNet_Aux(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
elif model_type == "custom":
self.model_type = model_type
self.main_model = main_model
self.aux_model = aux_model
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config, "main_model": self.main_model,
"aux_model": self.aux_model, "device": self.device, "verbose": self.verbose}
def _main_model_loss(self, y_true, y_pred_mu, y_pred_log_var, y_pred_log_var_aux):
r = torch.abs(y_true - y_pred_mu)
# + 0.5 * y_pred_log_var +
loss = torch.mean(0.5 * torch.exp(-y_pred_log_var) * r ** 2) + \\
self.config["lambda_match"] * torch.mean(torch.abs(torch.exp(0.5 * y_pred_log_var) - torch.exp(0.5 * y_pred_log_var_aux)))
return loss
def _aux_model_loss(self, y_true, y_pred_mu, y_pred_log_var_aux):
deltal = deltau = 2.0 * torch.exp(0.5 * y_pred_log_var_aux)
upper = y_pred_mu + deltau
lower = y_pred_mu - deltal
width = upper - lower
r = torch.abs(y_true - y_pred_mu)
emce = torch.mean(torch.sigmoid((y_true - lower) * (upper - y_true) * 100000))
loss_emce = torch.abs(self.config["calibration_alpha"]-emce)
loss_noise = torch.mean(torch.abs(0.5 * width - r))
loss_sharpness = torch.mean(torch.abs(upper - y_true)) + torch.mean(torch.abs(lower - y_true))
#print(emce)
return loss_emce + self.config["lambda_noise"] * loss_noise + self.config["lambda_sharpness"] * loss_sharpness
def fit(self, X, y):
""" Fit the Auxiliary Interval Predictor model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
X = torch.from_numpy(X).float().to(self.device)
y = torch.from_numpy(y).float().to(self.device)
dataset_loader = DataLoader(
TensorDataset(X,y),
batch_size=self.config["batch_size"]
)
optimizer_main_model = torch.optim.Adam(self.main_model.parameters(), lr=self.config["lr"])
optimizer_aux_model = torch.optim.Adam(self.aux_model.parameters(), lr=self.config["lr"])
for it in range(self.config["num_outer_iters"]):
# Train the main model
for epoch in range(self.config["num_main_iters"]):
avg_mean_model_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.main_model.train()
self.aux_model.eval()
batch_y_pred_log_var_aux = self.aux_model(batch_x)
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
main_loss = self._main_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var, batch_y_pred_log_var_aux)
optimizer_main_model.zero_grad()
main_loss.backward()
optimizer_main_model.step()
avg_mean_model_loss += main_loss.item()/len(dataset_loader)
if self.verbose:
print("Iter: {}, Epoch: {}, main_model_loss = {}".format(it, epoch, avg_mean_model_loss))
# Train the auxiliary model
for epoch in range(self.config["num_aux_iters"]):
avg_aux_model_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.aux_model.train()
self.main_model.eval()
batch_y_pred_log_var_aux = self.aux_model(batch_x)
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
aux_loss = self._aux_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var_aux)
optimizer_aux_model.zero_grad()
aux_loss.backward()
optimizer_aux_model.step()
avg_aux_model_loss += aux_loss.item() / len(dataset_loader)
if self.verbose:
print("Iter: {}, Epoch: {}, aux_model_loss = {}".format(it, epoch, avg_aux_model_loss))
return self
def predict(self, X, return_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
self.main_model.eval()
X = torch.from_numpy(X).float().to(self.device)
dataset_loader = DataLoader(
X,
batch_size=self.config["batch_size"]
)
y_mean_list = []
y_log_var_list = []
for batch_x in dataset_loader:
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
y_mean_list.append(batch_y_pred_mu.data.cpu().numpy())
y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy())
y_mean = np.concatenate(y_mean_list)
y_log_var = np.concatenate(y_log_var_list)
y_std = np.sqrt(np.exp(y_log_var))
y_lower = y_mean - 2.0*y_std
y_upper = y_mean + 2.0*y_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
return res
<s> from .auxiliary_interval_predictor import AuxiliaryIntervalPredictor
<s><s><s> import torch
import torch.nn.functional as F
from uq360.models.noise_models.heteroscedastic_noise_models import GaussianNoise
class GaussianNoiseMLPNet(torch.nn.Module):
def __init__(self, num | ||
_features, num_outputs, num_hidden):
super(GaussianNoiseMLPNet, self).__init__()
self.fc = torch.nn.Linear(num_features, num_hidden)
self.fc_mu = torch.nn.Linear(num_hidden, num_outputs)
self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs)
self.noise_layer = GaussianNoise()
def forward(self, x):
x = F.relu(self.fc(x))
mu = self.fc_mu(x)
log_var = self.fc_log_var(x)
return mu, log_var
def loss(self, y_true=None, mu_pred=None, log_var_pred=None):
return self.noise_layer.loss(y_true, mu_pred, log_var_pred, reduce_mean=True)<s> """
Contains implementations of various Bayesian layers
"""
import numpy as np
import torch
import torch.nn.functional as F
from torch.nn import Parameter
from uq360.models.bayesian_neural_networks.layer_utils import InvGammaHalfCauchyLayer, InvGammaLayer
td = torch.distributions
def reparam(mu, logvar, do_sample=True, mc_samples=1):
if do_sample:
std = torch.exp(0.5 * logvar)
eps = torch.FloatTensor(std.size()).normal_()
sample = mu + eps * std
for _ in np.arange(1, mc_samples):
sample += mu + eps * std
return sample / mc_samples
else:
return mu
class BayesianLinearLayer(torch.nn.Module):
"""
Affine layer with N(0, v/H) or N(0, user specified v) priors on weights and
fully factorized variational Gaussian approximation
"""
def __init__(self, in_features, out_features, cuda=False, init_weight=None, init_bias=None, prior_stdv=None):
super(BayesianLinearLayer, self).__init__()
self.cuda = cuda
self.in_features = in_features
self.out_features = out_features
# weight mean params
self.weights = Parameter(torch.Tensor(out_features, in_features))
self.bias = Parameter(torch.Tensor(out_features))
# weight variance params
self.weights_logvar = Parameter(torch.Tensor(out_features, in_features))
self.bias_logvar = Parameter(torch.Tensor(out_features))
# numerical stability
self.fudge_factor = 1e-8
if not prior_stdv:
# We will use a N(0, 1/num_inputs) prior over weights
self.prior_stdv = torch.FloatTensor([1. / np.sqrt(self.weights.size(1))])
else:
self.prior_stdv = torch.FloatTensor([prior_stdv])
# self.prior_stdv = torch.Tensor([1. / np.sqrt(1e+3)])
self.prior_mean = torch.FloatTensor([0.])
# for Bias use a prior of N(0, 1)
self.prior_bias_stdv = torch.FloatTensor([1.])
self.prior_bias_mean = torch.FloatTensor([0.])
# init params either random or with pretrained net
self.init_parameters(init_weight, init_bias)
def init_parameters(self, init_weight, init_bias):
# init means
if init_weight is not None:
self.weights.data = torch.Tensor(init_weight)
else:
self.weights.data.normal_(0, np.float(self.prior_stdv.numpy()[0]))
if init_bias is not None:
self.bias.data = torch.Tensor(init_bias)
else:
self.bias.data.normal_(0, 1)
# init variances
self.weights_logvar.data.normal_(-9, 1e-2)
self.bias_logvar.data.normal_(-9, 1e-2)
def forward(self, x, do_sample=True, scale_variances=False):
# local reparameterization trick
mu_activations = F.linear(x, self.weights, self.bias)
var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp())
if scale_variances:
activ = reparam(mu_activations, var_activations.log() - np.log(self.in_features), do_sample=do_sample)
else:
activ = reparam(mu_activations, var_activations.log(), do_sample=do_sample)
return activ
def kl(self):
"""
KL divergence (q(W) || p(W))
:return:
"""
weights_logvar = self.weights_logvar
kld_weights = self.prior_stdv.log() - weights_logvar.mul(0.5) + \\
(weights_logvar.exp() + (self.weights.pow(2) - self.prior_mean)) / (
2 * self.prior_stdv.pow(2)) - 0.5
kld_bias = self.prior_bias_stdv.log() - self.bias_logvar.mul(0.5) + \\
(self.bias_logvar.exp() + (self.bias.pow(2) - self.prior_bias_mean)) / (
2 * self.prior_bias_stdv.pow(2)) \\
- 0.5
return kld_weights.sum() + kld_bias.sum()
class HorseshoeLayer(BayesianLinearLayer):
"""
Uses non-centered parametrization. w_k = v*tau_k*beta_k where k indexes an output unit and w_k and beta_k
are vectors of all weights incident into the unit
"""
def __init__(self, in_features, out_features, cuda=False, scale=1.):
super(HorseshoeLayer, self).__init__(in_features, out_features)
self.cuda = cuda
self.in_features = in_features
self.out_features = out_features
self.nodescales = InvGammaHalfCauchyLayer(out_features=out_features, b=1.)
self.layerscale = InvGammaHalfCauchyLayer(out_features=1, b=scale)
# prior on beta is N(0, I) when employing non centered parameterization
self.prior_stdv = torch.Tensor([1])
self.prior_mean = torch.Tensor([0.])
def forward(self, x, do_sample=True, debug=False, eps_scale=None, eps_w=None):
# At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample
# sample scales
scale_mean = 0.5 * (self.nodescales.mu + self.layerscale.mu)
scale_var = 0.25 * (self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2)
scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp()
# sample preactivations
mu_activations = F.linear(x, self.weights, self.bias)
var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp())
activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample)
return scale_sample * activ_sample
def kl(self):
return super(HorseshoeLayer, self).kl() + self.nodescales.kl() + self.layerscale.kl()
def fixed_point_updates(self):
self.nodescales.fixed_point_updates()
self.layerscale.fixed_point_updates()
class RegularizedHorseshoeLayer(HorseshoeLayer):
"""
Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe.
For all weights w_k incident upon node k in the layer we have:
w_k ~ N(0, (tau_k * v)^2 I) N(0, c^2 I), c^2 ~ InverseGamma(c_a, b).
c^2 controls the scale of the thresholding. As c^2 -> infinity, the regularized Horseshoe -> Horseshoe.
"""
def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.):
super(RegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale)
self.c = InvGammaLayer(a=c_a, b=c_b)
def forward(self, x, do_sample=True, **kwargs):
# At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample
# sample regularized scales
scale_mean = self.nodescales.mu + self.layerscale.mu
scale_var = self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2
scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp()
c_sample = reparam(self.c.mu, 2 * self.c.log_sigma, do_sample=do_sample).exp()
regularized_scale_sample = (c_sample * scale_sample) / (c_sample + scale_sample)
# sample preactivations
mu_activations = F.linear(x, self.weights, self.bias)
var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp())
activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample)
return torch.sqrt(regularized_scale_sample) * activ_sample
def kl(self):
return super(RegularizedHorseshoeLayer, self).kl() + self.c.kl()
class NodeSpecificRegularizedHorseshoeLayer(RegularizedHorseshoeLayer):
"""
Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe.
For all weights w_k incident upon node k in the layer we have:
w_k ~ N(0, (tau_k * v)^2 I) N(0, c_k^2 I), c_k^2 ~ InverseGamma(a, b).
c_k^2 controls the scale of the thresholding. As c_k^2 -> infinity, the regularized Horseshoe -> Horseshoe
Note that we now have a per-node c_k.
"""
def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.):
super(NodeSpecificRegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale)
self.c = InvGammaLayer(a=c_a, b=c_b, out_features=out_features)
<s><s> """
Contains implementations of various utilities used by Horseshoe Bayesian layers
"""
import numpy as np
import torch
from torch.nn import Parameter
td = torch.distributions
gammaln = torch.lgamma
def diag_gaussian_entropy(log_std, D):
return 0.5 * D * (1.0 + torch.log(2 * np.pi)) + torch.sum(log_std)
def inv_gamma_entropy(a, b):
return torch.sum(a + torch.log(b) + torch.lgamma(a) - (1 + a) * torch.digamma(a))
def log_normal_entropy(log_std, mu, D):
return torch.sum(log_std + mu + 0.5) + (D / 2) * np.log(2 * np.pi)
class InvGammaHalfCauchyLayer(torch.nn.Module):
"""
Uses the inverse Gamma parameterization of the half-Cauchy distribution.
a ~ C^+(0, b) <==> a^2 ~ IGamma(0.5, 1/lambda), lambda ~ IGamma(0.5, 1/b^2), where lambda is an
auxiliary latent variable.
Uses a factorized variational approximation q(ln a^2)q(lambda) = N(mu, sigma^2) IGamma(ahat, bhat).
This layer places a half Cauchy prior on the scales of each output node of the layer.
"""
def __init__(self, out_features, b):
"""
:param out_fatures: number of output nodes in the layer.
:param b: scale of the half Cauchy
"""
super(InvGammaHalfCauchyLayer, self).__init__()
self.b = b
self.out_features = out_features
# variational parameters for q(ln a^2)
self.mu = Parameter(torch.FloatTensor(out_features))
self.log_sigma = Parameter(torch.FloatTensor(out_features))
# self.log_sigma = torch.FloatTensor(out_features)
# variational parameters for q(lambda). These will be updated via fixed point updates, hence not parameters.
self.ahat = torch.FloatTensor([1.]) # The posterior parameter is always 1.
self.bhat = torch.ones(out_features) * (1.0 / self.b ** 2)
self.const = torch.FloatTensor([0.5])
self.initialize_from_prior()
def initialize_from_prior(self):
"""
Initializes variational parameters by sampling from the prior.
"""
# sample from half cauchy and log to initialize the mean of the log normal
sample = np.abs(self.b * (np.random.randn(self.out_features) / np.random.randn(self.out_features)))
self.mu.data = torch.FloatTensor(np.log(sample))
self.log_sigma.data = torch.FloatTensor(np.random.randn(self.out_features) - 10.)
def expectation_wrt_prior(self):
"""
Computes E[ln p(a^2 | lambda)] + E[ln p(lambda)]
"""
expected_a_given_lambda = -gammaln(self.const) - 0.5 * (torch.log(self.bhat) - torch.digamma(self.ahat)) + (
-0.5 - 1.) * self.mu - torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2) * (self.ahat / self.bhat)
expected_lambda = -gammaln(self.const) - 2 * 0.5 * np.log( | ||
self.b) + (-self.const - 1.) * (
torch.log(self.bhat) - torch.digamma(self.ahat)) - (1. / self.b ** 2) * (self.ahat / self.bhat)
return torch.sum(expected_a_given_lambda) + torch.sum(expected_lambda)
def entropy(self):
"""
Computes entropy of q(ln a^2) and q(lambda)
"""
return self.entropy_lambda() + self.entropy_a2()
def entropy_lambda(self):
return inv_gamma_entropy(self.ahat, self.bhat)
def entropy_a2(self):
return log_normal_entropy(self.log_sigma, self.mu, self.out_features)
def kl(self):
"""
Computes KL(q(ln(a^2)q(lambda) || IG(a^2 | 0.5, 1/lambda) IG(lambda | 0.5, 1/b^2))
"""
return -self.expectation_wrt_prior() - self.entropy()
def fixed_point_updates(self):
# update lambda moments
self.bhat = torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2) + (1. / self.b ** 2)
class InvGammaLayer(torch.nn.Module):
"""
Approximates the posterior of c^2 with prior IGamma(c^2 | a , b)
using a log Normal approximation q(ln c^2) = N(mu, sigma^2)
"""
def __init__(self, a, b, out_features=1):
super(InvGammaLayer, self).__init__()
self.a = torch.FloatTensor([a])
self.b = torch.FloatTensor([b])
# variational parameters for q(ln c^2)
self.mu = Parameter(torch.FloatTensor(out_features))
self.log_sigma = Parameter(torch.FloatTensor(out_features))
self.out_features = out_features
self.initialize_from_prior()
def initialize_from_prior(self):
"""
Initializes variational parameters by sampling from the prior.
"""
self.mu.data = torch.log(self.b / (self.a + 1) * torch.ones(self.out_features)) # initialize at the mode
self.log_sigma.data = torch.FloatTensor(np.random.randn(self.out_features) - 10.)
def expectation_wrt_prior(self):
"""
Computes E[ln p(c^2 | a, b)]
"""
# return self.c_a * np.log(self.c_b) - gammaln(self.c_a) + (
# - self.c_a - 1) * c_mu - self.c_b * Ecinv
return self.a * torch.log(self.b) - gammaln(self.a) + (- self.a - 1) \\
* self.mu - self.b * torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2)
def entropy(self):
return log_normal_entropy(self.log_sigma, self.mu, 1)
def kl(self):
"""
Computes KL(q(ln(c^2) || IG(c^2 | a, b))
"""
return -self.expectation_wrt_prior().sum() - self.entropy()
<s> import numpy as np
import torch
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseFixedPrecision
def compute_test_ll(y_test, y_pred_samples, std_y=1.):
"""
Computes test log likelihoods = (1 / Ntest) * \\sum_n p(y_n | x_n, D_train)
:param y_test: True y
:param y_pred_samples: y^s = f(x_test, w^s); w^s ~ q(w). S x Ntest, where S is the number of samples
q(w) is either a trained variational posterior or an MCMC approximation to p(w | D_train)
:param std_y: True std of y (assumed known)
"""
S, _ = y_pred_samples.shape
noise = GaussianNoiseFixedPrecision(std_y=std_y)
ll = noise.loss(y_pred=y_pred_samples, y_true=y_test.unsqueeze(dim=0), reduce_sum=False)
ll = torch.logsumexp(ll, dim=0) - np.log(S) # mean over num samples
return torch.mean(ll) # mean over test points
<s> from abc import ABC
import numpy as np
import torch
from torch import nn
from uq360.models.bayesian_neural_networks.layers import HorseshoeLayer, BayesianLinearLayer, RegularizedHorseshoeLayer
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision
import numpy as np
td = torch.distributions
class HshoeBNN(nn.Module, ABC):
"""
Bayesian neural network with Horseshoe layers.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', num_layers=1,
hshoe_scale=1e-1, use_reg_hshoe=False):
if use_reg_hshoe:
layer = RegularizedHorseshoeLayer
else:
layer = HorseshoeLayer
super(HshoeBNN, self).__init__()
self.num_layers = num_layers
if activation_type == 'relu':
# activation
self.activation = nn.ReLU()
elif activation_type == 'tanh':
self.activation = nn.Tanh()
else:
print("Activation Type not supported")
self.fc_hidden = []
self.fc1 = layer(ip_dim, num_nodes, scale=hshoe_scale)
for _ in np.arange(self.num_layers - 1):
self.fc_hidden.append(layer(num_nodes, num_nodes))
self.fc_out = BayesianLinearLayer(num_nodes, op_dim)
self.noise_layer = None
def forward(self, x, do_sample=True):
x = self.fc1(x, do_sample=do_sample)
x = self.activation(x)
for layer in self.fc_hidden:
x = layer(x, do_sample=do_sample)
x = self.activation(x)
return self.fc_out(x, do_sample=do_sample, scale_variances=True)
def kl_divergence_w(self):
kld = self.fc1.kl() + self.fc_out.kl()
for layer in self.fc_hidden:
kld += layer.kl()
return kld
def fixed_point_updates(self):
if hasattr(self.fc1, 'fixed_point_updates'):
self.fc1.fixed_point_updates()
if hasattr(self.fc_out, 'fixed_point_updates'):
self.fc_out.fixed_point_updates()
for layer in self.fc_hidden:
if hasattr(layer, 'fixed_point_updates'):
layer.fixed_point_updates()
def prior_predictive_samples(self, n_sample=100):
n_eval = 1000
x = torch.linspace(-2, 2, n_eval)[:, np.newaxis]
y = np.zeros([n_sample, n_eval])
for i in np.arange(n_sample):
y[i] = self.forward(x).data.numpy().ravel()
return x.data.numpy(), y
### get and set weights ###
def get_weights(self):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
weight_dict = {}
weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
return weight_dict
def set_weights(self, weight_dict):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
to_param = lambda x: nn.Parameter(torch.Tensor(x))
self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1])
self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1])
self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1])
self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1])
self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1])
self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1])
self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1])
self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1])
class HshoeRegressionNet(HshoeBNN, ABC):
"""
Horseshoe net with N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b) likelihoods.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False):
super(HshoeRegressionNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
hshoe_scale=hshoe_scale,
use_reg_hshoe=use_reg_hshoe)
self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.)
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer.loss(y_pred=out, y_true=y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik
return neg_elbo
def mse(self, x, y):
"""
scaled rmse (scaled by 1 / std_y**2)
"""
E_noise_precision = 1. / self.noise_layer.get_noise_var()
return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum()
def get_noise_var(self):
return self.noise_layer.get_noise_var()
class HshoeClassificationNet(HshoeBNN, ABC):
"""
Horseshoe net with Categorical(y_true | f(x, w)) likelihoods. Use for classification.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False):
super(HshoeClassificationNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
hshoe_scale=hshoe_scale,
use_reg_hshoe=use_reg_hshoe)
self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum')
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer(out, y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w()) / num_batches - Elik
return neg_elbo
<s> from abc import ABC
import torch
from torch import nn
from uq360.models.bayesian_neural_networks.layers import BayesianLinearLayer
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision
import numpy as np
td = torch.distributions
class BayesianNN(nn.Module, ABC):
"""
Bayesian neural network with zero mean Gaussian priors over weights.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50,
activation_type='relu', num_layers=1):
super(BayesianNN, self).__init__()
self.num_layers = num_layers
if activation_type == 'relu':
# activation
self.activation = nn.ReLU()
elif activation_type == 'tanh':
self.activation = nn.Tanh()
else:
print("Activation Type not supported")
self.fc_hidden = []
self.fc1 = layer(ip_dim, num_nodes,)
for _ in np.arange(self.num_layers - 1):
self.fc_hidden.append(layer(num_nodes, num_nodes, ))
self.fc_out = layer(num_nodes, op_dim, )
self.noise_layer = None
def forward(self, x, do_sample=True):
x = self.fc1(x, do_sample=do_sample)
x = self.activation(x)
for layer in self.fc_hidden:
x = layer(x, do_ | ||
sample=do_sample)
x = self.activation(x)
return self.fc_out(x, do_sample=do_sample, scale_variances=True)
def kl_divergence_w(self):
kld = self.fc1.kl() + self.fc_out.kl()
for layer in self.fc_hidden:
kld += layer.kl()
return kld
def prior_predictive_samples(self, n_sample=100):
n_eval = 1000
x = torch.linspace(-2, 2, n_eval)[:, np.newaxis]
y = np.zeros([n_sample, n_eval])
for i in np.arange(n_sample):
y[i] = self.forward(x).data.numpy().ravel()
return x.data.numpy(), y
### get and set weights ###
def get_weights(self):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
weight_dict = {}
weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
return weight_dict
def set_weights(self, weight_dict):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
to_param = lambda x: nn.Parameter(torch.Tensor(x))
self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1])
self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1])
self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1])
self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1])
self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1])
self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1])
self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1])
self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1])
class BayesianRegressionNet(BayesianNN, ABC):
"""
Bayesian neural net with N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b) likelihoods.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1):
super(BayesianRegressionNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
)
self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.)
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer.loss(y_pred=out, y_true=y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik
return neg_elbo
def mse(self, x, y):
"""
scaled rmse (scaled by 1 / std_y**2)
"""
E_noise_precision = 1. / self.noise_layer.get_noise_var()
return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum()
def get_noise_var(self):
return self.noise_layer.get_noise_var()
class BayesianClassificationNet(BayesianNN, ABC):
"""
Bayesian neural net with Categorical(y_true | f(x, w)) likelihoods. Use for classification.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1):
super(BayesianClassificationNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers)
self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum')
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer(out, y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = self.kl_divergence_w() / num_batches - Elik
return neg_elbo
<s><s> import math
import numpy as np
import torch
from scipy.special import gammaln
from uq360.models.noise_models.noisemodel import AbstractNoiseModel
from torch.nn import Parameter
td = torch.distributions
def transform(a):
return torch.log(1 + torch.exp(a))
class GaussianNoise(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f_\\mu(x, w), f_\\sigma^2(x, w))
"""
def __init__(self, cuda=False):
super(GaussianNoise, self).__init__()
self.cuda = cuda
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
def loss(self, y_true=None, mu_pred=None, log_var_pred=None, reduce_mean=True):
"""
computes -1 * ln N (y_true | mu_pred, softplus(log_var_pred))
:param y_true:
:param mu_pred:
:param log_var_pred:
:return:
"""
var_pred = transform(log_var_pred)
ll = -0.5 * self.const - 0.5 * torch.log(var_pred) - 0.5 * (1. / var_pred) * ((mu_pred - y_true) ** 2)
if reduce_mean:
return -ll.mean(dim=0)
else:
return -ll.sum(dim=0)
def get_noise_var(self, log_var_pred):
return transform(log_var_pred)
<s> import math
import numpy as np
import torch
from scipy.special import gammaln
from uq360.models.noise_models.noisemodel import AbstractNoiseModel
from torch.nn import Parameter
td = torch.distributions
def transform(a):
return torch.log(1 + torch.exp(a))
class GaussianNoiseGammaPrecision(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b).
Uses a variational approximation; q(lambda) = Gamma(ahat, bhat)
"""
def __init__(self, a0=6, b0=6, cuda=False):
super(GaussianNoiseGammaPrecision, self).__init__()
self.cuda = cuda
self.a0 = a0
self.b0 = b0
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
# variational parameters
self.ahat = Parameter(torch.FloatTensor([10.]))
self.bhat = Parameter(torch.FloatTensor([3.]))
def loss(self, y_pred=None, y_true=None):
"""
computes -1 * E_q(\\lambda)[ln N (y_pred | y_true, \\lambda^-1)], where q(lambda) = Gamma(ahat, bhat)
:param y_pred:
:param y_true:
:return:
"""
n = y_pred.shape[0]
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return -1 * (-0.5 * n * self.const + 0.5 * n * (torch.digamma(ahat) - torch.log(bhat)) \\
- 0.5 * (ahat/bhat) * ((y_pred - y_true) ** 2).sum())
def kl(self):
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return (ahat - self.a0) * torch.digamma(ahat) - torch.lgamma(ahat) + gammaln(self.a0) + \\
self.a0 * (torch.log(bhat) - np.log(self.b0)) + ahat * (self.b0 - bhat) / bhat
def get_noise_var(self):
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return (bhat / ahat).data.numpy()[0]
class GaussianNoiseFixedPrecision(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f(x, w), sigma_y**2); known sigma_y
"""
def __init__(self, std_y=1., cuda=False):
super(GaussianNoiseFixedPrecision, self).__init__()
self.cuda = cuda
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
self.sigma_y = std_y
def loss(self, y_pred=None, y_true=None):
"""
computes -1 * ln N (y_pred | y_true, sigma_y**2)
:param y_pred:
:param y_true:
:return:
"""
ll = -0.5 * self.const - np.log(self.sigma_y) - 0.5 * (1. / self.sigma_y ** 2) * ((y_pred - y_true) ** 2)
return -ll.sum(dim=0)
def get_noise_var(self):
return self.sigma_y ** 2<s> import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class AbstractNoiseModel(ABC):
""" Abstract class. All noise models inherit from here.
"""
def __init__(self, *argv, **kwargs):
""" Initialize an AbstractNoiseModel object.
"""
@abc.abstractmethod
def loss(self, *argv, **kwargs):
""" Compute loss given predictions and groundtruth labels
"""
raise NotImplementedError
@abc.abstractmethod
def get_noise_var(self, *argv, **kwargs):
"""
Return the current estimate of noise variance
"""
raise NotImplementedError
<s><s> import autograd
import autograd.numpy as np
import scipy.optimize
from autograd import grad
from autograd.scipy.special import logsumexp
from sklearn.cluster import KMeans
class HMM:
"""
A Hidden Markov Model with Gaussian observations with
unknown means and known precisions.
"""
def __init__(self, X, config_dict=None):
self.N, self.T, self.D = X.shape
self.K = config_dict['K'] # number of HMM states
self.I = np.eye(self.K)
self.Precision = np.zeros([self.D, self.D, self.K])
self.X = X
if config_dict['precision'] is None:
for k in np.arange(self.K):
self.Precision[:, :, k] = np.eye(self.D)
else:
self.Precision = config_dict['precision']
self.dParams_dWeights = None
self.alphaT = None # Store the final beliefs.
self.beta1 = None # store the first timestep beliefs from the beta recursion.
self.forward_trellis = {} # stores \\alpha
self.backward_trellis = {} # stores \\beta
def initialize_params(self, seed=1234):
np.random.seed(seed)
param_dict = {}
A = np.random.randn(self.K, self.K)
# use k-means to initialize the mean parameters
X = self.X.reshape([-1, self.D])
kmeans = KMeans(n_clusters=self.K, random_state=seed,
n_init=15).fit(X)
labels = kmeans.labels_
_, counts = np.unique(labels, return_counts=True)
pi = counts
phi = kmeans.cluster_centers_
param_dict['A'] = np.exp(A)
param_dict['pi0'] = pi
param_dict['phi'] = phi
return self.pack_params(param_dict)
def unpack_params(self, params):
param_dict = dict()
K = self.K
# For unpacking simplex parameters: have packed them as
# log(pi[:-1]) - log(pi[-1]).
unnorm_A = np.exp(np.append(params[:K**2-K].reshape(K, K-1),
np.zeros((K, 1)),
axis=1)
)
Z = np.sum(unnorm_A[:, :-1], axis=1)
unnorm_A /= Z[:, np.newaxis]
norm_A = unnorm_A / unnorm_A.sum(axis=1, keepdims=True)
param_dict['A'] = norm_A
unnorm_pi = np.exp(np.append(params[K**2-K:K**2-1], | ||
0.0))
Z = np.sum(unnorm_pi[:-1])
unnorm_pi /= Z
param_dict['pi0'] = unnorm_pi / unnorm_pi.sum()
param_dict['phi'] = params[K**2-K+K-1:].reshape(self.D, K)
return param_dict
def weighted_alpha_recursion(self, xseq, pi, phi, Sigma, A, wseq, store_belief=False):
"""
Computes the weighted marginal probability of the sequence xseq given parameters;
weights wseq turn on or off the emissions p(x_t | z_t) (weighting scheme B)
:param xseq: T * D
:param pi: K * 1
:param phi: D * K
:param wseq: T * 1
:param A:
:return:
"""
ll = self.log_obs_lik(xseq[:, :, np.newaxis], phi[np.newaxis, :, :], Sigma)
alpha = np.log(pi.ravel()) + wseq[0] * ll[0]
if wseq[0] == 0:
self.forward_trellis[0] = alpha[:, np.newaxis]
for t in np.arange(1, self.T):
alpha = logsumexp(alpha[:, np.newaxis] + np.log(A), axis=0) + wseq[t] * ll[t]
if wseq[t] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.forward_trellis[t] = alpha[:, np.newaxis]
if store_belief:
# store the final belief
self.alphaT = alpha
return logsumexp(alpha)
def weighted_beta_recursion(self, xseq, pi, phi, Sigma, A, wseq, store_belief=False):
"""
Runs beta recursion;
weights wseq turn on or off the emissions p(x_t | z_t) (weighting scheme B)
:param xseq: T * D
:param pi: K * 1
:param phi: D * K
:param wseq: T * 1
:param A:
:return:
"""
ll = self.log_obs_lik(xseq[:, :, np.newaxis], phi[np.newaxis, :, :], Sigma)
beta = np.zeros_like(pi.ravel()) # log(\\beta) of all ones.
max_t = ll.shape[0]
if wseq[max_t - 1] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.backward_trellis[max_t - 1] = beta[:, np.newaxis]
for i in np.arange(1, max_t):
t = max_t - i - 1
beta = logsumexp((beta + wseq[t + 1] * ll[t + 1])[np.newaxis, :] + np.log(A), axis=1)
if wseq[t] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.backward_trellis[t] = beta[:, np.newaxis]
# account for the init prob
beta = (beta + wseq[0] * ll[0]) + np.log(pi.ravel())
if store_belief:
# store the final belief
self.beta1 = beta
return logsumexp(beta)
def weighted_loss(self, params, weights):
"""
For LOOCV / IF computation within a single sequence. Uses weighted alpha recursion
:param params:
:param weights:
:return:
"""
param_dict = self.unpack_params(params)
logp = self.get_prior_contrib(param_dict)
logp = logp + self.weighted_alpha_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights)
return -logp
def loss_at_missing_timesteps(self, weights, params):
"""
:param weights: zeroed out weights indicate missing values
:param params: packed parameters
:return:
"""
# empty forward and backward trellis
self.clear_trellis()
param_dict = self.unpack_params(params)
# populate forward and backward trellis
lpx = self.weighted_alpha_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights,
store_belief=True )
lpx_alt = self.weighted_beta_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights,
store_belief=True)
assert np.allclose(lpx, lpx_alt) # sanity check
test_ll = []
# compute loo likelihood
ll = self.log_obs_lik(self.X[0][:, :, np.newaxis], param_dict['phi'], self.Precision)
# compute posterior p(z_t | x_1,...t-1, t+1,...T) \\forall missing t
tsteps = []
for t in self.forward_trellis.keys():
lpz_given_x = self.forward_trellis[t] + self.backward_trellis[t] - lpx
test_ll.append(logsumexp(ll[t] + lpz_given_x.ravel()))
tsteps.append(t)
# empty forward and backward trellis
self.clear_trellis()
return -np.array(test_ll)
def fit(self, weights, init_params=None, num_random_restarts=1, verbose=False, maxiter=None):
if maxiter:
options_dict = {'disp': verbose, 'gtol': 1e-10, 'maxiter': maxiter}
else:
options_dict = {'disp': verbose, 'gtol': 1e-10}
# Define a function that returns gradients of training loss using Autograd.
training_loss_fun = lambda params: self.weighted_loss(params, weights)
training_gradient_fun = grad(training_loss_fun, 0)
if init_params is None:
init_params = self.initialize_params()
if verbose:
print("Initial loss: ", training_loss_fun(init_params))
res = scipy.optimize.minimize(fun=training_loss_fun,
jac=training_gradient_fun,
x0=init_params,
tol=1e-10,
options=options_dict)
if verbose:
print('grad norm =', np.linalg.norm(res.jac))
return res.x
def clear_trellis(self):
self.forward_trellis = {}
self.backward_trellis = {}
#### Required for IJ computation ###
def compute_hessian(self, params_one, weights_one):
return autograd.hessian(self.weighted_loss, argnum=0)(params_one, weights_one)
def compute_jacobian(self, params_one, weights_one):
return autograd.jacobian(autograd.jacobian(self.weighted_loss, argnum=0), argnum=1)\\
(params_one, weights_one).squeeze()
###################################################
@staticmethod
def log_obs_lik(x, phi, Sigma):
"""
:param x: T*D*1
:param phi: 1*D*K
:param Sigma: D*D*K --- precision matrices per state
:return: ll
"""
centered_x = x - phi
ll = -0.5 * np.einsum('tdk, tdk, ddk -> tk', centered_x, centered_x, Sigma )
return ll
@staticmethod
def pack_params(params_dict):
param_list = [(np.log(params_dict['A'][:, :-1]) -
np.log(params_dict['A'][:, -1])[:, np.newaxis]).ravel(),
np.log(params_dict['pi0'][:-1]) - np.log(params_dict['pi0'][-1]),
params_dict['phi'].ravel()]
return np.concatenate(param_list)
@staticmethod
def get_prior_contrib(param_dict):
logp = 0.0
# Prior
logp += -0.5 * (np.linalg.norm(param_dict['phi'], axis=0) ** 2).sum()
logp += (1.1 - 1) * np.log(param_dict['A']).sum()
logp += (1.1 - 1) * np.log(param_dict['pi0']).sum()
return logp
@staticmethod
def get_indices_in_held_out_fold(T, pct_to_drop, contiguous=False):
"""
:param T: length of the sequence
:param pct_to_drop: % of T in the held out fold
:param contiguous: if True generate a block of indices to drop else generate indices by iid sampling
:return: o (the set of indices in the fold)
"""
if contiguous:
l = np.floor(pct_to_drop / 100. * T)
anchor = np.random.choice(np.arange(l + 1, T))
o = np.arange(anchor - l, anchor).astype(int)
else:
# i.i.d LWCV
o = np.random.choice(T - 2, size=np.int(pct_to_drop / 100. * T), replace=False) + 1
return o
@staticmethod
def synthetic_hmm_data(K, T, D, sigma0=None, seed=1234, varainces_of_mean=1.0,
diagonal_upweight=False):
"""
:param K: Number of HMM states
:param T: length of the sequence
"""
N = 1 # For structured IJ we will remove data / time steps from a single sequence
np.random.seed(seed)
if sigma0 is None:
sigma0 = np.eye(D)
A = np.random.dirichlet(alpha=np.ones(K), size=K)
if diagonal_upweight:
A = A + 3 * np.eye(K) # add 3 to the diagonal and renormalize to encourage self transitions
A = A / A.sum(axis=1)
pi0 = np.random.dirichlet(alpha=np.ones(K))
mus = np.random.normal(size=(K, D), scale=np.sqrt(varainces_of_mean))
zs = np.empty((N, T), dtype=np.int)
X = np.empty((N, T, D))
for n in range(N):
zs[n, 0] = int(np.random.choice(np.arange(K), p=pi0))
X[n, 0] = np.random.multivariate_normal(mean=mus[zs[n, 0]], cov=sigma0)
for t in range(1, T):
zs[n, t] = int(np.random.choice(np.arange(K), p=A[zs[n, t - 1], :]))
X[n, t] = np.random.multivariate_normal(mean=mus[zs[n, t]], cov=sigma0)
return {'X': X, 'state_assignments': zs, 'A': A, 'initial_state_assignment': pi0, 'means': mus}
<s> from builtins import range
import autograd.numpy as np
def adam(grad, x, callback=None, num_iters=100, step_size=0.001, b1=0.9, b2=0.999, eps=10**-8, polyak=False):
"""Adapted from autograd.misc.optimizers"""
m = np.zeros(len(x))
v = np.zeros(len(x))
for i in range(num_iters):
g = grad(x, i)
if callback: callback(x, i, g, polyak)
m = (1 - b1) * g + b1 * m # First moment estimate.
v = (1 - b2) * (g**2) + b2 * v # Second moment estimate.
mhat = m / (1 - b1**(i + 1)) # Bias correction.
vhat = v / (1 - b2**(i + 1))
x = x - step_size*mhat/(np.sqrt(vhat) + eps)
return x<s> import matplotlib.pyplot as plt
import numpy as np
import numpy.random as npr
import torch as torch
def make_data_gap(seed, data_count=100):
import GPy
npr.seed(0)
x = np.hstack([np.linspace(-5, -2, int(data_count/2)), np.linspace(2, 5, int(data_count/2))])
x = x[:, np.newaxis]
k = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)
K = k.K(x)
L = np.linalg.cholesky(K + 1e-5 * np.eye(data_count))
# draw a noise free random function from a GP
eps = np.random.randn(data_count)
f = L @ eps
# use a homoskedastic Gaussian noise model N(f(x)_i, \\sigma^2). \\sigma^2 = 0.1
eps_noise = np.sqrt(0.1) * np.random.randn(data_count)
y = f + eps_noise
y = y[:, np.newaxis]
plt.plot(x, f, 'ko', ms=2)
plt.plot(x, y, 'ro')
plt.title("GP generated Data")
plt.pause(1)
return torch.FloatTensor(x), torch.FloatTensor(y), torch.FloatTensor(x), torch.FloatTensor(y)
def make_data_sine(seed, data_count=450):
# fix the random seed
np.random.seed(seed)
noise_var = 0.1
X = np.linspace(-4, 4, data_count)
y = 1*np.sin(X) + np.sqrt(noise_var)*npr.randn(data_count)
train_count = int (0.2 * data_count)
idx = npr.permutation(range(data_ |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 6