Spaces:
Running
Running
import argparse | |
from . import gaussian_diffusion as gd | |
from .respace import SpacedDiffusion, space_timesteps | |
# from .unet import SuperResModel | |
NUM_CLASSES = 1000 | |
def model_and_diffusion_defaults(): | |
""" | |
Defaults for image training. | |
""" | |
return dict( | |
image_size=64, | |
num_channels=128, | |
num_res_blocks=2, | |
num_heads=4, | |
num_heads_upsample=-1, | |
attention_resolutions="16,8", | |
dropout=0.0, | |
learn_sigma=False, | |
class_cond=False, | |
diffusion_steps=1000, | |
noise_schedule="linear", | |
timestep_respacing="", | |
use_kl=False, | |
predict_xstart=False, | |
rescale_timesteps=True, | |
rescale_learned_sigmas=True, | |
use_checkpoint=False, | |
use_scale_shift_norm=True, | |
model_arch="trans-unet", | |
in_channel=8, | |
out_channel=8, | |
training_mode="emb", | |
vocab_size=66, | |
config_name="QizhiPei/biot5-base-text2mol", | |
experiment_mode="lm", | |
logits_mode=1, | |
) | |
# def sr_model_and_diffusion_defaults(): | |
# res = model_and_diffusion_defaults() | |
# res["large_size"] = 256 | |
# res["small_size"] = 64 | |
# arg_names = inspect.getfullargspec(sr_create_model_and_diffusion)[0] | |
# for k in res.copy().keys(): | |
# if k not in arg_names: | |
# del res[k] | |
# return res | |
# def sr_create_model_and_diffusion( | |
# large_size, | |
# small_size, | |
# class_cond, | |
# learn_sigma, | |
# num_channels, | |
# num_res_blocks, | |
# num_heads, | |
# num_heads_upsample, | |
# attention_resolutions, | |
# dropout, | |
# diffusion_steps, | |
# noise_schedule, | |
# timestep_respacing, | |
# use_kl, | |
# predict_xstart, | |
# rescale_timesteps, | |
# rescale_learned_sigmas, | |
# use_checkpoint, | |
# use_scale_shift_norm, | |
# ): | |
# model = sr_create_model( | |
# large_size, | |
# small_size, | |
# num_channels, | |
# num_res_blocks, | |
# learn_sigma=learn_sigma, | |
# class_cond=class_cond, | |
# use_checkpoint=use_checkpoint, | |
# attention_resolutions=attention_resolutions, | |
# num_heads=num_heads, | |
# num_heads_upsample=num_heads_upsample, | |
# use_scale_shift_norm=use_scale_shift_norm, | |
# dropout=dropout, | |
# ) | |
# diffusion = create_gaussian_diffusion( | |
# steps=diffusion_steps, | |
# learn_sigma=learn_sigma, | |
# noise_schedule=noise_schedule, | |
# use_kl=use_kl, | |
# predict_xstart=predict_xstart, | |
# rescale_timesteps=rescale_timesteps, | |
# rescale_learned_sigmas=rescale_learned_sigmas, | |
# timestep_respacing=timestep_respacing, | |
# ) | |
# return model, diffusion | |
# def sr_create_model( | |
# large_size, | |
# small_size, | |
# num_channels, | |
# num_res_blocks, | |
# learn_sigma, | |
# class_cond, | |
# use_checkpoint, | |
# attention_resolutions, | |
# num_heads, | |
# num_heads_upsample, | |
# use_scale_shift_norm, | |
# dropout, | |
# ): | |
# _ = small_size # hack to prevent unused variable | |
# if large_size == 256: | |
# channel_mult = (1, 1, 2, 2, 4, 4) | |
# elif large_size == 64: | |
# channel_mult = (1, 2, 3, 4) | |
# else: | |
# raise ValueError(f"unsupported large size: {large_size}") | |
# attention_ds = [] | |
# for res in attention_resolutions.split(","): | |
# attention_ds.append(large_size // int(res)) | |
# return SuperResModel( | |
# in_channels=3, | |
# model_channels=num_channels, | |
# out_channels=(3 if not learn_sigma else 6), | |
# num_res_blocks=num_res_blocks, | |
# attention_resolutions=tuple(attention_ds), | |
# dropout=dropout, | |
# channel_mult=channel_mult, | |
# num_classes=(NUM_CLASSES if class_cond else None), | |
# use_checkpoint=use_checkpoint, | |
# num_heads=num_heads, | |
# num_heads_upsample=num_heads_upsample, | |
# use_scale_shift_norm=use_scale_shift_norm, | |
# ) | |
def create_gaussian_diffusion( | |
*, | |
steps=1000, | |
learn_sigma=False, | |
noise_schedule="linear", # sqrt | |
use_kl=False, | |
predict_xstart=False, # True | |
rescale_timesteps=False, # True | |
rescale_learned_sigmas=False, # True | |
timestep_respacing="", | |
model_arch="conv-unet", # transformer | |
training_mode="emb", # e2e | |
): | |
return SpacedDiffusion( | |
use_timesteps=space_timesteps(2000, [2000]), | |
betas=gd.get_named_beta_schedule("sqrt", 2000), | |
model_mean_type=(gd.ModelMeanType.START_X), | |
model_var_type=( | |
(gd.ModelVarType.FIXED_LARGE) | |
if not learn_sigma | |
else gd.ModelVarType.LEARNED_RANGE | |
), | |
loss_type=gd.LossType.E2E_MSE, | |
rescale_timesteps=True, | |
model_arch="transformer", | |
training_mode="e2e", | |
) | |
def add_dict_to_argparser(parser, default_dict): | |
for k, v in default_dict.items(): | |
v_type = type(v) | |
if v is None: | |
v_type = str | |
elif isinstance(v, bool): | |
v_type = str2bool | |
parser.add_argument(f"--{k}", default=v, type=v_type) | |
def args_to_dict(args, keys): | |
return {k: getattr(args, k) for k in keys} | |
def str2bool(v): | |
""" | |
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse | |
""" | |
if isinstance(v, bool): | |
return v | |
if v.lower() in ("yes", "true", "t", "y", "1"): | |
return True | |
elif v.lower() in ("no", "false", "f", "n", "0"): | |
return False | |
else: | |
raise argparse.ArgumentTypeError("boolean value expected") | |