Spaces:
Runtime error
Runtime error
File size: 8,098 Bytes
d4fe1e6 65e6336 d4fe1e6 65e6336 d4fe1e6 04c4c30 c39e9ff d4fe1e6 5d29bbd 8d6462e c186a11 8d6462e d4fe1e6 c39e9ff 241cc6f 204dc42 d4fe1e6 c39e9ff c186a11 8d6462e e0a15c9 c39e9ff eb601c1 e6ffe8d d4fe1e6 5d29bbd bc31fa8 5d29bbd eb601c1 5d29bbd eb601c1 c39e9ff bc31fa8 5d29bbd eb601c1 1955fd3 eb601c1 5d29bbd eb601c1 1955fd3 5d29bbd c97c9f9 5d29bbd eb601c1 5d29bbd bc31fa8 38930b8 5d29bbd eb601c1 5d29bbd eb601c1 5d29bbd eb601c1 5219f50 3f591a2 eb601c1 5d29bbd e6ffe8d 8712735 e6ffe8d 8d6462e e6ffe8d 3767bc2 c39e9ff e6ffe8d e9116d0 c39e9ff 3767bc2 5d29bbd e6ffe8d ff2dfb3 3f1a935 e6ffe8d 45b25a1 ff2dfb3 63a03db e6ffe8d 1955fd3 2442693 c52d702 e6ffe8d 1955fd3 e6ffe8d 9f16209 29bc652 e6ffe8d 1955fd3 204dc42 eb601c1 c52d702 65e6336 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
# -*- coding: utf-8 -*-
"""Copy of compose_glide.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/19xx6Nu4FeiGj-TzTUFxBf-15IkeuFx_F
"""
import streamlit as st
import gradio as gr
import torch as th
from composable_diffusion.download import download_model
from composable_diffusion.model_creation import create_model_and_diffusion as create_model_and_diffusion_for_clevr
from composable_diffusion.model_creation import model_and_diffusion_defaults as model_and_diffusion_defaults_for_clevr
from torch import autocast
from composable_stable_diffusion_pipeline import ComposableStableDiffusionPipeline
# This notebook supports both CPU and GPU.
# On CPU, generating one sample may take on the order of 20 minutes.
# On a GPU, it should be under a minute.
has_cuda = th.cuda.is_available()
device = th.device('cpu' if not th.cuda.is_available() else 'cuda')
print(device)
# init stable diffusion model
pipe = ComposableStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
use_auth_token=st.secrets["USER_TOKEN"]
).to(device)
def dummy(images, **kwargs):
return images, False
pipe.safety_checker = dummy
# create model for CLEVR Objects
clevr_options = model_and_diffusion_defaults_for_clevr()
flags = {
"image_size": 128,
"num_channels": 192,
"num_res_blocks": 2,
"learn_sigma": True,
"use_scale_shift_norm": False,
"raw_unet": True,
"noise_schedule": "squaredcos_cap_v2",
"rescale_learned_sigmas": False,
"rescale_timesteps": False,
"num_classes": '2',
"dataset": "clevr_pos",
"use_fp16": has_cuda,
"timestep_respacing": '100'
}
for key, val in flags.items():
clevr_options[key] = val
clevr_model, clevr_diffusion = create_model_and_diffusion_for_clevr(**clevr_options)
clevr_model.eval()
if has_cuda:
clevr_model.convert_to_fp16()
clevr_model.to(device)
clevr_model.load_state_dict(th.load(download_model('clevr_pos'), device))
print('total clevr_pos parameters', sum(x.numel() for x in clevr_model.parameters()))
def compose_clevr_objects(prompt, guidance_scale, steps):
coordinates = [[float(x.split(',')[0].strip()), float(x.split(',')[1].strip())]
for x in prompt.split('|')]
coordinates += [[-1, -1]] # add unconditional score label
batch_size = 1
clevr_options['timestep_respacing'] = str(int(steps))
_, clevr_diffusion = create_model_and_diffusion_for_clevr(**clevr_options)
def model_fn(x_t, ts, **kwargs):
half = x_t[:1]
combined = th.cat([half] * kwargs['y'].size(0), dim=0)
model_out = clevr_model(combined, ts, **kwargs)
eps, rest = model_out[:, :3], model_out[:, 3:]
masks = kwargs.get('masks')
cond_eps = eps[masks].mean(dim=0, keepdim=True)
uncond_eps = eps[~masks].mean(dim=0, keepdim=True)
half_eps = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
eps = th.cat([half_eps] * x_t.size(0), dim=0)
return th.cat([eps, rest], dim=1)
def sample(coordinates):
masks = [True] * (len(coordinates) - 1) + [False]
model_kwargs = dict(
y=th.tensor(coordinates, dtype=th.float, device=device),
masks=th.tensor(masks, dtype=th.bool, device=device)
)
samples = clevr_diffusion.p_sample_loop(
model_fn,
(len(coordinates), 3, clevr_options["image_size"], clevr_options["image_size"]),
device=device,
clip_denoised=True,
progress=True,
model_kwargs=model_kwargs,
cond_fn=None,
)[:batch_size]
return samples
samples = sample(coordinates)
out_img = samples[0].permute(1, 2, 0)
out_img = (out_img + 1) / 2
out_img = (out_img.detach().cpu() * 255.).to(th.uint8)
out_img = out_img.numpy()
return out_img
def stable_diffusion_compose(prompt, scale, steps, weights, seed):
generator = th.Generator("cuda").manual_seed(int(seed))
with autocast('cpu' if not th.cuda.is_available() else 'cuda'):
image = pipe(prompt, guidance_scale=scale, num_inference_steps=steps,
weights=weights, generator=generator)["sample"][0]
image.save(f'{"_".join(prompt.split())}.png')
return image
def compose(prompt, weights, version, guidance_scale, steps, seed):
try:
with th.no_grad():
if version == 'Stable_Diffusion_1v_4':
return stable_diffusion_compose(prompt, guidance_scale, steps, weights, seed)
else:
return compose_clevr_objects(prompt, guidance_scale, steps)
except Exception as e:
print(e)
return None
examples_1 = "A castle in a forest | grainy, fog"
examples_2 = 'A blue sky | A mountain in the horizon | Cherry Blossoms in front of the mountain'
examples_3 = '0.1, 0.5 | 0.3, 0.5 | 0.5, 0.5 | 0.7, 0.5 | 0.9, 0.5'
examples_4 = 'a photo of Obama | a photo of Biden'
examples_5 = 'a white church | lightning in the background'
examples_6 = 'a camel | arctic'
examples_7 = 'A lake | A mountain | Cherry Blossoms next to the lake'
examples = [
[examples_1, "1 | -1", 'Stable_Diffusion_1v_4', 15, 50, 0],
[examples_4, "1 | 1", 'Stable_Diffusion_1v_4', 15, 50, 0],
[examples_7, "1 | 1 | 1", 'Stable_Diffusion_1v_4', 15, 50, 0],
[examples_5, "1 | 1", 'Stable_Diffusion_1v_4', 15, 50, 0],
[examples_6, "1 | 1", 'Stable_Diffusion_1v_4', 15, 50, 0],
[examples_3, "1 | 1 | 1 | 1 | 1", 'CLEVR Objects', 10, 100, 0]
]
title = 'Compositional Visual Generation with Composable Diffusion Models'
description = '<p>Our conjucntion and negation operators are also added into stable diffusion webui! (<a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Negative-prompt">Negation</a> and <a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/c26732fbee2a57e621ac22bf70decf7496daa4cd">Conjunction</a>)</p></p><p>See more information from our <a href="https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/">Project Page</a>.</p><ul><li>One version is based on the released <a href="https://github.com/openai/glide-text2im">GLIDE</a> and <a href="https://github.com/CompVis/stable-diffusion/">Stable Diffusion</a> for composing natural language description.</li><li>Another is based on our pre-trained CLEVR Object Model for composing objects. <br>(<b>Note</b>: We recommend using <b><i>x</i></b> in range <b><i>[0.1, 0.9]</i></b> and <b><i>y</i></b> in range <b><i>[0.25, 0.7]</i></b>, since the training dataset labels are in given ranges.)</li></ul><p>When composing multiple sentences, use `|` as the delimiter, see given examples below.</p><p>You can also specify the weight of each text by using `|` as the delimiter. When the weight is negative, it will use Negation Operator (NOT). Otherwise it will use Conjucntion operator (AND).</p><p><b>Only Conjunction operator is enabled for CLEVR Object.</b></p><p><b>Note: When using Stable Diffusion, black images will be returned if the given prompt is detected as problematic. For composing GLIDE model, we recommend using the Colab demo in our <a href="https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/">Project Page</a>.</b></p>'
iface = gr.Interface(compose,
inputs=[
gr.Textbox(label='prompt', value='a photo of Obama | a photo of Biden'),
gr.Textbox(label='weights', value='1 | 1'),
gr.Radio(['Stable_Diffusion_1v_4', 'CLEVR Objects'], type="value", label='version', value='Stable_Diffusion_1v_4'),
gr.Slider(2, 30, value=15),
gr.Slider(10, 200, value=50),
gr.Number(0)
],
outputs='image', cache_examples=False,
title=title, description=description, examples=examples)
iface.launch()
|