Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from diffusers import StableDiffusionXLPipeline
|
2 |
+
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
|
3 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
4 |
+
import torch
|
5 |
+
from PIL import Image, ImageOps
|
6 |
+
import gradio as gr
|
7 |
+
|
8 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
9 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
10 |
+
torch_dtype=torch.float16,
|
11 |
+
variants="fp16",
|
12 |
+
use_safetensor=True,
|
13 |
+
)
|
14 |
+
pipe.to("cuda")
|
15 |
+
|
16 |
+
@torch.no_grad()
|
17 |
+
def call(
|
18 |
+
pipe, prompt, prompt2, height, width, num_inference_steps, denoising_end,
|
19 |
+
guidance_scale, guidance_scale2, negative_prompt, negative_prompt2,
|
20 |
+
num_images_per_prompt, eta, generator, latents, prompt_embeds,
|
21 |
+
negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds,
|
22 |
+
output_type, return_dict, callback, callback_steps, cross_attention_kwargs,
|
23 |
+
guidance_rescale, original_size, crops_coords_top_left, target_size,
|
24 |
+
negative_original_size, negative_crops_coords_top_left, negative_target_size):
|
25 |
+
height = height or pipe.default_sample_size * pipe.vae_scale_factor
|
26 |
+
width = width or pipe.default_sample_size * pipe.vae_scale_factor
|
27 |
+
original_size = original_size or (height, width)
|
28 |
+
target_size = target_size or (height, width)
|
29 |
+
pipe.check_inputs(prompt, None, height, width, callback_steps, negative_prompt, None, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds)
|
30 |
+
batch_size = 1 if isinstance(prompt, str) else len(prompt) if isinstance(prompt, list) else prompt_embeds.shape[0]
|
31 |
+
device = pipe._execution_device
|
32 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
33 |
+
text_encoder_lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs else None
|
34 |
+
prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds = pipe.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt)
|
35 |
+
prompt2_embeds, negative_prompt2_embeds, pooled_prompt2_embeds, negative_pooled_prompt2_embeds = pipe.encode_prompt(prompt2, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt2)
|
36 |
+
pipe.scheduler.set_timesteps(num_inference_steps, device=device)
|
37 |
+
timesteps = pipe.scheduler.timesteps
|
38 |
+
num_channels_latents = pipe.unet.config.in_channels
|
39 |
+
latents = pipe.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents)
|
40 |
+
extra_step_kwargs = pipe.prepare_extra_step_kwargs(generator, eta)
|
41 |
+
add_text_embeds, add_text2_embeds = pooled_prompt_embeds, pooled_prompt2_embeds
|
42 |
+
add_time_ids = pipe._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype)
|
43 |
+
add_time2_ids = pipe._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt2_embeds.dtype)
|
44 |
+
negative_add_time_ids = pipe._get_add_time_ids(negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype) if negative_original_size and negative_target_size else add_time_ids
|
45 |
+
if do_classifier_free_guidance:
|
46 |
+
prompt_embeds, add_text_embeds, add_time_ids = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0), torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0), torch.cat([negative_add_time_ids, add_time_ids], dim=0)
|
47 |
+
prompt2_embeds, add_text2_embeds, add_time2_ids = torch.cat([negative_prompt2_embeds, prompt2_embeds], dim=0), torch.cat([negative_pooled_prompt2_embeds, add_text2_embeds], dim=0), torch.cat([negative_add_time_ids, add_time2_ids], dim=0)
|
48 |
+
prompt_embeds, add_text_embeds, add_time_ids = prompt_embeds.to(device), add_text_embeds.to(device), add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
49 |
+
prompt2_embeds, add_text2_embeds, add_time2_ids = prompt2_embeds.to(device), add_text2_embeds.to(device), add_time2_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
50 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * pipe.scheduler.order, 0)
|
51 |
+
if denoising_end and isinstance(denoising_end, float) and 0 < denoising_end < 1:
|
52 |
+
discrete_timestep_cutoff = int(round(pipe.scheduler.config.num_train_timesteps - (denoising_end * pipe.scheduler.config.num_train_timesteps)))
|
53 |
+
num_inference_steps = len([ts for ts in timesteps if ts >= discrete_timestep_cutoff])
|
54 |
+
timesteps = timesteps[:num_inference_steps]
|
55 |
+
with pipe.progress_bar(total=num_inference_steps) as progress_bar:
|
56 |
+
for i, t in enumerate(timesteps):
|
57 |
+
if i % 2 == 0:
|
58 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
59 |
+
latent_model_input = pipe.scheduler.scale_model_input(latent_model_input, t)
|
60 |
+
noise_pred = pipe.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs={"text_embeds": add_text_embeds, "time_ids": add_time_ids})[0]
|
61 |
+
if do_classifier_free_guidance:
|
62 |
+
noise_pred = noise_pred.chunk(2)[0] + guidance_scale * (noise_pred.chunk(2)[1] - noise_pred.chunk(2)[0])
|
63 |
+
else:
|
64 |
+
latent_model_input2 = torch.cat([latents.flip(2)] * 2) if do_classifier_free_guidance else latents
|
65 |
+
latent_model_input2 = pipe.scheduler.scale_model_input(latent_model_input2, t)
|
66 |
+
noise_pred2 = pipe.unet(latent_model_input2
|
67 |
+
|
68 |
+
def simple_call(prompt1, prompt2, guidance_scale1, guidance_scale2, negative_prompt1, negative_prompt2):
|
69 |
+
generator = [torch.Generator(device="cuda").manual_seed(5)]
|
70 |
+
res = call(pipe, prompt1, prompt2, width=768, height=768, num_images_per_prompt=1, num_inference_steps=50, guidance_scale=guidance_scale1, guidance_scale2=guidance_scale2, negative_prompt=negative_prompt1, negative_prompt2=negative_prompt2, generator=generator)
|
71 |
+
image1 = res.images[0]
|
72 |
+
image2 = ImageOps.exif_transpose(image1.rotate(180, resample=0))
|
73 |
+
return image1, image2
|
74 |
+
|
75 |
+
with gr.Blocks() as app:
|
76 |
+
gr.Markdown(
|
77 |
+
'''
|
78 |
+
<center><h1>Upside Down Diffusion</h1></span>
|
79 |
+
Placeholder
|
80 |
+
</center>
|
81 |
+
'''
|
82 |
+
)
|
83 |
+
|
84 |
+
with gr.Row():
|
85 |
+
with gr.Column():
|
86 |
+
prompt1 = gr.Textbox(label="Prompt 1")
|
87 |
+
prompt2 = gr.Textbox(label="Prompt 2")
|
88 |
+
negative_prompt1 = gr.Textbox(label="Negative Prompt 1")
|
89 |
+
negative_prompt2 = gr.Textbox(label="Negative Prompt 2")
|
90 |
+
guidance_scale1 = gr.Slider(minimum=0, maximum=10, step=0.1, label="Guidance Scale 1")
|
91 |
+
guidance_scale2 = gr.Slider(minimum=0, maximum=10, step=0.1, label="Guidance Scale 2")
|
92 |
+
run_btn = gr.Button("Run")
|
93 |
+
|
94 |
+
with gr.Accordion(label="Advanced Options", open=False):
|
95 |
+
# You can place additional sliders or options here
|
96 |
+
pass
|
97 |
+
|
98 |
+
with gr.Column():
|
99 |
+
result_image1 = gr.Image(label="Output 1")
|
100 |
+
result_image2 = gr.Image(label="Output 2 (Rotated)")
|
101 |
+
|
102 |
+
run_btn.click(
|
103 |
+
simple_call,
|
104 |
+
inputs=[prompt1, prompt2, guidance_scale1, guidance_scale2, negative_prompt1, negative_prompt2],
|
105 |
+
outputs=[result_image1, result_image2]
|
106 |
+
)
|
107 |
+
|
108 |
+
app.queue(max_size=20)
|
109 |
+
|
110 |
+
if __name__ == "__main__":
|
111 |
+
app.launch(debug=True)
|