Spaces:
Sleeping
Sleeping
Commit
·
c60af9e
1
Parent(s):
4944e3d
debug 9
Browse files
app.py
CHANGED
@@ -1,95 +1,67 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
# def erzeuge(prompt):
|
10 |
-
# return pipeline(prompt).images # [0]
|
11 |
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
# model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda")
|
16 |
-
# scheduler.set_timesteps(50)
|
17 |
|
18 |
-
# sample_size = model.config.sample_size
|
19 |
-
# noise = torch.randn((1, 3, sample_size, sample_size)).to("cuda")
|
20 |
-
# input = noise
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
# input = prev_noisy_sample
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
# return image
|
32 |
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
37 |
|
38 |
-
# # pipeline.to("cuda")
|
39 |
|
|
|
|
|
|
|
40 |
|
41 |
-
#
|
42 |
-
# with gr.Column(variant="panel"):
|
43 |
-
# with gr.Row(variant="compact"):
|
44 |
-
# text = gr.Textbox(
|
45 |
-
# label="Deine Beschreibung:",
|
46 |
-
# show_label=False,
|
47 |
-
# max_lines=1,
|
48 |
-
# placeholder="Bildbeschreibung",
|
49 |
-
# ).scale(
|
50 |
-
# container=False,
|
51 |
-
# )
|
52 |
-
# btn = gr.Button("erzeuge Bild").style(full_width=False, min_width=100)
|
53 |
|
54 |
-
# gallery = gr.Gallery(
|
55 |
-
# label="Erzeugtes Bild", show_label=False, elem_id="gallery"
|
56 |
-
# ).style(columns=[2], rows=[2], object_fit="contain", height="auto")
|
57 |
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
-
|
62 |
-
|
|
|
63 |
|
|
|
|
|
64 |
|
|
|
|
|
65 |
|
66 |
|
67 |
|
68 |
|
69 |
|
70 |
-
from diffusers import DiffusionPipeline
|
71 |
-
from diffusers import DDPMPipeline, DDIMPipeline, PNDMPipeline
|
72 |
-
import torch
|
73 |
-
import gradio as gr
|
74 |
-
import random
|
75 |
|
76 |
-
pipeline = DiffusionPipeline.from_pretrained("google/ddpm-celebahq-256")
|
77 |
-
# pipeline.to("cuda")
|
78 |
|
79 |
-
def predict(steps, seed):
|
80 |
-
generator = torch.manual_seed(seed)
|
81 |
-
for i in range(1,steps):
|
82 |
-
yield pipeline(generator=generator, num_inference_steps=i).images[0]
|
83 |
-
|
84 |
-
random_seed = random.randint(0, 2147483647)
|
85 |
-
gr.Interface(
|
86 |
-
predict,
|
87 |
-
inputs=[
|
88 |
-
gr.inputs.Slider(1, 100, label='Inference Steps', default=5, step=1),
|
89 |
-
gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1),
|
90 |
-
],
|
91 |
-
outputs=gr.Image(shape=[128,128], type="pil", elem_id="output_image"),
|
92 |
-
css="#output_image{width: 256px}",
|
93 |
-
title="Unconditional butterflies",
|
94 |
-
description="图片生成器",
|
95 |
-
).queue().launch()
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from diffusers import DiffusionPipeline
|
3 |
+
import torch
|
4 |
+
from diffusers import DDPMScheduler, UNet2DModel
|
5 |
+
from PIL import Image
|
6 |
+
import numpy as np
|
|
|
|
|
|
|
|
|
7 |
|
8 |
|
9 |
+
def erzeuge(prompt):
|
10 |
+
return pipeline(prompt).images # [0]
|
|
|
|
|
11 |
|
|
|
|
|
|
|
12 |
|
13 |
+
def erzeuge_komplex(prompt):
|
14 |
+
scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256")
|
15 |
+
model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda")
|
16 |
+
scheduler.set_timesteps(50)
|
|
|
17 |
|
18 |
+
sample_size = model.config.sample_size
|
19 |
+
noise = torch.randn((1, 3, sample_size, sample_size)).to("cuda")
|
20 |
+
input = noise
|
|
|
21 |
|
22 |
+
for t in scheduler.timesteps:
|
23 |
+
with torch.no_grad():
|
24 |
+
noisy_residual = model(input, t).sample
|
25 |
+
prev_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
|
26 |
+
input = prev_noisy_sample
|
27 |
|
28 |
+
image = (input / 2 + 0.5).clamp(0, 1)
|
29 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
|
30 |
+
image = Image.fromarray((image * 255).round().astype("uint8"))
|
31 |
+
return image
|
32 |
|
|
|
33 |
|
34 |
+
# pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2")
|
35 |
+
# pipeline = DiffusionPipeline.from_pretrained("google/ddpm-cat-256")
|
36 |
+
pipeline = DiffusionPipeline.from_pretrained("google/ddpm-celebahq-256")
|
37 |
|
38 |
+
# pipeline.to("cuda")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
|
|
|
|
|
|
40 |
|
41 |
+
with gr.Blocks() as demo:
|
42 |
+
with gr.Column(variant="panel"):
|
43 |
+
with gr.Row(variant="compact"):
|
44 |
+
text = gr.Textbox(
|
45 |
+
label="Deine Beschreibung:",
|
46 |
+
show_label=False,
|
47 |
+
max_lines=1,
|
48 |
+
placeholder="Bildbeschreibung",
|
49 |
+
)
|
50 |
+
btn = gr.Button("erzeuge Bild").style(full_width=False, min_width=100)
|
51 |
|
52 |
+
gallery = gr.Gallery(
|
53 |
+
label="Erzeugtes Bild", show_label=False, elem_id="gallery"
|
54 |
+
).style(columns=[2], rows=[2], object_fit="contain", height="auto")
|
55 |
|
56 |
+
btn.click(erzeuge, inputs=[text], outputs=[gallery])
|
57 |
+
text.submit(erzeuge, inputs=[text], outputs=[gallery])
|
58 |
|
59 |
+
if __name__ == "__main__":
|
60 |
+
demo.launch()
|
61 |
|
62 |
|
63 |
|
64 |
|
65 |
|
|
|
|
|
|
|
|
|
|
|
66 |
|
|
|
|
|
67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|