Spaces:
Sleeping
Sleeping
Commit
·
c4f95ed
1
Parent(s):
f270f46
debug 4
Browse files
app.py
CHANGED
@@ -1,63 +1,42 @@
|
|
1 |
import gradio as gr
|
2 |
from diffusers import DiffusionPipeline
|
3 |
import torch
|
4 |
-
#für die komplexere Variante der Erzeugung
|
5 |
from diffusers import DDPMScheduler, UNet2DModel
|
6 |
from PIL import Image
|
7 |
import numpy as np
|
8 |
|
9 |
|
10 |
-
##############################################
|
11 |
-
#Hilfsfunktionen
|
12 |
-
##############################################
|
13 |
-
|
14 |
-
#######################################
|
15 |
-
#Bild nach dem eingegebenen prompt erzeugen - mit Pipeline
|
16 |
def erzeuge(prompt):
|
17 |
-
return pipeline(prompt).images #[0]
|
|
|
18 |
|
19 |
-
|
20 |
-
#Bild erzeugen - nich über Pipeline sondern mit mehr Einstellungsmöglichkeiten
|
21 |
-
def erzeuge_komplex(prompt):
|
22 |
scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256")
|
23 |
model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda")
|
24 |
scheduler.set_timesteps(50)
|
25 |
-
|
26 |
sample_size = model.config.sample_size
|
27 |
noise = torch.randn((1, 3, sample_size, sample_size)).to("cuda")
|
28 |
input = noise
|
29 |
-
|
30 |
for t in scheduler.timesteps:
|
31 |
with torch.no_grad():
|
32 |
noisy_residual = model(input, t).sample
|
33 |
prev_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
|
34 |
input = prev_noisy_sample
|
35 |
-
|
36 |
image = (input / 2 + 0.5).clamp(0, 1)
|
37 |
image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
|
38 |
image = Image.fromarray((image * 255).round().astype("uint8"))
|
39 |
return image
|
40 |
|
41 |
-
######################################################
|
42 |
-
#Modelle laden
|
43 |
-
#######################################
|
44 |
-
#Alternativ erzeugen
|
45 |
-
#gr.Interface.load("models/stabilityai/stable-diffusion-2").launch()
|
46 |
|
47 |
-
|
48 |
-
|
49 |
-
pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2")
|
50 |
-
pipeline.to("cuda")
|
51 |
|
52 |
-
|
53 |
-
#Alternativ: Bild erzeugen - nicht über Pipeline sondern mit mehr Einstellungsmöglichkeiten
|
54 |
-
#scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256")
|
55 |
-
#model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda")
|
56 |
|
57 |
|
58 |
-
######################################################
|
59 |
-
#Gradio UI erzeugen
|
60 |
-
######################################################
|
61 |
with gr.Blocks() as demo:
|
62 |
with gr.Column(variant="panel"):
|
63 |
with gr.Row(variant="compact"):
|
@@ -79,4 +58,4 @@ with gr.Blocks() as demo:
|
|
79 |
text.submit(erzeuge, inputs=[text], outputs=[gallery])
|
80 |
|
81 |
if __name__ == "__main__":
|
82 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from diffusers import DiffusionPipeline
|
3 |
import torch
|
|
|
4 |
from diffusers import DDPMScheduler, UNet2DModel
|
5 |
from PIL import Image
|
6 |
import numpy as np
|
7 |
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
def erzeuge(prompt):
|
10 |
+
return pipeline(prompt).images # [0]
|
11 |
+
|
12 |
|
13 |
+
def erzeuge_komplex(prompt):
|
|
|
|
|
14 |
scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256")
|
15 |
model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda")
|
16 |
scheduler.set_timesteps(50)
|
17 |
+
|
18 |
sample_size = model.config.sample_size
|
19 |
noise = torch.randn((1, 3, sample_size, sample_size)).to("cuda")
|
20 |
input = noise
|
21 |
+
|
22 |
for t in scheduler.timesteps:
|
23 |
with torch.no_grad():
|
24 |
noisy_residual = model(input, t).sample
|
25 |
prev_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
|
26 |
input = prev_noisy_sample
|
27 |
+
|
28 |
image = (input / 2 + 0.5).clamp(0, 1)
|
29 |
image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
|
30 |
image = Image.fromarray((image * 255).round().astype("uint8"))
|
31 |
return image
|
32 |
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
+
# pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2")
|
35 |
+
pipeline = DiffusionPipeline.from_pretrained("google/ddpm-cat-256")
|
|
|
|
|
36 |
|
37 |
+
pipeline.to("cuda")
|
|
|
|
|
|
|
38 |
|
39 |
|
|
|
|
|
|
|
40 |
with gr.Blocks() as demo:
|
41 |
with gr.Column(variant="panel"):
|
42 |
with gr.Row(variant="compact"):
|
|
|
58 |
text.submit(erzeuge, inputs=[text], outputs=[gallery])
|
59 |
|
60 |
if __name__ == "__main__":
|
61 |
+
demo.launch()
|
app1.py
CHANGED
@@ -7,19 +7,21 @@ import random
|
|
7 |
pipeline = DDPMPipeline.from_pretrained("google/ddpm-cat-256")
|
8 |
# pipeline.to("cuda")
|
9 |
|
|
|
10 |
def predict(steps, seed):
|
11 |
generator = torch.manual_seed(seed)
|
12 |
-
for i in range(1,steps):
|
13 |
yield pipeline(generator=generator, num_inference_steps=i).images[0]
|
14 |
|
|
|
15 |
random_seed = random.randint(0, 2147483647)
|
16 |
gr.Interface(
|
17 |
predict,
|
18 |
inputs=[
|
19 |
-
gr.inputs.Slider(1, 100, label=
|
20 |
-
gr.inputs.Slider(0, 2147483647, label=
|
21 |
],
|
22 |
-
outputs=gr.Image(shape=[128,128], type="pil", elem_id="output_image"),
|
23 |
css="#output_image{width: 256px}",
|
24 |
title="Unconditional butterflies",
|
25 |
description="图片生成器",
|
|
|
7 |
pipeline = DDPMPipeline.from_pretrained("google/ddpm-cat-256")
|
8 |
# pipeline.to("cuda")
|
9 |
|
10 |
+
|
11 |
def predict(steps, seed):
|
12 |
generator = torch.manual_seed(seed)
|
13 |
+
for i in range(1, steps):
|
14 |
yield pipeline(generator=generator, num_inference_steps=i).images[0]
|
15 |
|
16 |
+
|
17 |
random_seed = random.randint(0, 2147483647)
|
18 |
gr.Interface(
|
19 |
predict,
|
20 |
inputs=[
|
21 |
+
gr.inputs.Slider(1, 100, label="Inference Steps", default=5, step=1),
|
22 |
+
gr.inputs.Slider(0, 2147483647, label="Seed", default=random_seed, step=1),
|
23 |
],
|
24 |
+
outputs=gr.Image(shape=[128, 128], type="pil", elem_id="output_image"),
|
25 |
css="#output_image{width: 256px}",
|
26 |
title="Unconditional butterflies",
|
27 |
description="图片生成器",
|