milyiyo commited on
Commit
830135c
1 Parent(s): 0e3b8e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -41
app.py CHANGED
@@ -1,47 +1,27 @@
1
- import gradio as gr
2
  from diffusers import LDMTextToImagePipeline
3
- import torch
4
- import numpy as np
5
- import PIL
6
- import cv2
7
-
8
  import PIL.Image
 
9
  import random
 
10
 
11
- print('\nDEBUG: Version: 3')
12
-
13
- #pipeline = LDMTextToImagePipeline.from_pretrained("fusing/latent-diffusion-text2im-large")
14
  ldm_pipeline = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256")
15
 
16
- FOOTER = '<img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.glitch.me/badge?page_id=milyiyo.testing-diffusers" />'
17
-
18
- def greet(name):
19
- return "Hello " + name + "!!"
20
-
21
- def genimage(prompt, iterations):
22
- image = pipeline([prompt], generator=generator, eta=0.3, guidance_scale=6.0, num_inference_steps=iterations)["sample"]
23
- return image[0]
24
-
25
- #image_processed = image.cpu().permute(0, 2, 3, 1)
26
- #image_processed = image_processed * 255.
27
- #image_processed = image_processed.numpy().astype(np.uint8)
28
- #image_pil = PIL.Image.fromarray(image_processed[0])
29
-
30
- # save image
31
- #file_name = "test.png"
32
- #image_pil.save(file_name)
33
- #img = cv2.imread(file_name)
34
- ##cv2_imshow(img)
35
- #return img
36
-
37
- def predict(prompt, steps=100):
38
- torch.cuda.empty_cache()
39
- generator = torch.manual_seed(42)
40
- images = ldm_pipeline([prompt], generator=generator, num_inference_steps=steps, eta=0.3, guidance_scale=6.0)["sample"]
41
- return images[0]
42
-
43
- iface = gr.Interface(
44
- fn=predict,
45
- inputs=["text", "number"],
46
- outputs=gr.Image(shape=[256,256], type="pil", elem_id="output_image"))
47
- iface.launch()
 
 
1
  from diffusers import LDMTextToImagePipeline
2
+ import gradio as gr
 
 
 
 
3
  import PIL.Image
4
+ import numpy as np
5
  import random
6
+ import torch
7
 
 
 
 
8
  ldm_pipeline = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256")
9
 
10
+ def predict(prompt, steps=100, seed=42, guidance_scale=6.0):
11
+ torch.cuda.empty_cache()
12
+ generator = torch.manual_seed(seed)
13
+ images = ldm_pipeline([prompt], generator=generator, num_inference_steps=steps, eta=0.3, guidance_scale=guidance_scale)["sample"]
14
+ return images[0]
15
+
16
+ random_seed = random.randint(0, 2147483647)
17
+ gr.Interface(
18
+ predict,
19
+ inputs=[
20
+ gr.inputs.Textbox(label='Prompt', default='a chalk pastel drawing of a llama wearing a wizard hat'),
21
+ gr.inputs.Slider(1, 100, label='Inference Steps', default=50, step=1),
22
+ gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1),
23
+ gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=6.0, step=0.1),
24
+ ],
25
+ outputs=gr.Image(shape=[256,256], type="pil", elem_id="output_image"),
26
+ css="#output_image{width: 256px}",
27
+ ).launch()