Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,47 +1,27 @@
|
|
1 |
-
import gradio as gr
|
2 |
from diffusers import LDMTextToImagePipeline
|
3 |
-
import
|
4 |
-
import numpy as np
|
5 |
-
import PIL
|
6 |
-
import cv2
|
7 |
-
|
8 |
import PIL.Image
|
|
|
9 |
import random
|
|
|
10 |
|
11 |
-
print('\nDEBUG: Version: 3')
|
12 |
-
|
13 |
-
#pipeline = LDMTextToImagePipeline.from_pretrained("fusing/latent-diffusion-text2im-large")
|
14 |
ldm_pipeline = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256")
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
##cv2_imshow(img)
|
35 |
-
#return img
|
36 |
-
|
37 |
-
def predict(prompt, steps=100):
|
38 |
-
torch.cuda.empty_cache()
|
39 |
-
generator = torch.manual_seed(42)
|
40 |
-
images = ldm_pipeline([prompt], generator=generator, num_inference_steps=steps, eta=0.3, guidance_scale=6.0)["sample"]
|
41 |
-
return images[0]
|
42 |
-
|
43 |
-
iface = gr.Interface(
|
44 |
-
fn=predict,
|
45 |
-
inputs=["text", "number"],
|
46 |
-
outputs=gr.Image(shape=[256,256], type="pil", elem_id="output_image"))
|
47 |
-
iface.launch()
|
|
|
|
|
1 |
from diffusers import LDMTextToImagePipeline
|
2 |
+
import gradio as gr
|
|
|
|
|
|
|
|
|
3 |
import PIL.Image
|
4 |
+
import numpy as np
|
5 |
import random
|
6 |
+
import torch
|
7 |
|
|
|
|
|
|
|
8 |
ldm_pipeline = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256")
|
9 |
|
10 |
+
def predict(prompt, steps=100, seed=42, guidance_scale=6.0):
|
11 |
+
torch.cuda.empty_cache()
|
12 |
+
generator = torch.manual_seed(seed)
|
13 |
+
images = ldm_pipeline([prompt], generator=generator, num_inference_steps=steps, eta=0.3, guidance_scale=guidance_scale)["sample"]
|
14 |
+
return images[0]
|
15 |
+
|
16 |
+
random_seed = random.randint(0, 2147483647)
|
17 |
+
gr.Interface(
|
18 |
+
predict,
|
19 |
+
inputs=[
|
20 |
+
gr.inputs.Textbox(label='Prompt', default='a chalk pastel drawing of a llama wearing a wizard hat'),
|
21 |
+
gr.inputs.Slider(1, 100, label='Inference Steps', default=50, step=1),
|
22 |
+
gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1),
|
23 |
+
gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=6.0, step=0.1),
|
24 |
+
],
|
25 |
+
outputs=gr.Image(shape=[256,256], type="pil", elem_id="output_image"),
|
26 |
+
css="#output_image{width: 256px}",
|
27 |
+
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|