AkiKagura commited on
Commit
ed597da
·
1 Parent(s): ff68612

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -0
app.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ #import torch
3
+ #from torch import autocast // only for GPU
4
+
5
+ from PIL import Image
6
+ import numpy as np
7
+ from io import BytesIO
8
+ import os
9
+ MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
10
+
11
+ #from diffusers import StableDiffusionPipeline
12
+ from diffusers import StableDiffusionImg2ImgPipeline
13
+
14
+ def empty_checker(images, **kwargs): return images, False
15
+ print("start generating")
16
+
17
+ YOUR_TOKEN=MY_SECRET_TOKEN
18
+
19
+ device="cpu"
20
+
21
+ img_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("AkiKagura/mkgen-diffusion", use_auth_token=YOUR_TOKEN)
22
+ img_pipe.to(device)
23
+
24
+ source_img = gr.Image(source="canvas", type="filepath", tool='color-sketch', label="new gradio color sketch")
25
+
26
+ gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")
27
+
28
+ def resize(value,img):
29
+ #baseheight = value
30
+ img = Image.open(img)
31
+ #hpercent = (baseheight/float(img.size[1]))
32
+ #wsize = int((float(img.size[0])*float(hpercent)))
33
+ #img = img.resize((wsize,baseheight), Image.Resampling.LANCZOS)
34
+ img = img.resize((value,value), Image.Resampling.LANCZOS)
35
+ return img
36
+
37
+
38
+ def infer(source_img, prompt):
39
+
40
+ source_image = resize(512, source_img)
41
+ source_image.save('source.png')
42
+ images_list = img_pipe([prompt] * 1, guidance_scale=7.5, num_inference_steps=25, init_image=source_image, strength=0.75)
43
+ images = []
44
+
45
+ for i, image in enumerate(images_list["sample"]):
46
+ images.append(image)
47
+ return images
48
+
49
+ print("okay")
50
+
51
+ title="Marco Generation Painting"
52
+ description="Draw and use 'mkmk woman' to get Marco pics. <br />Warning: Slow process... ~ around 10 min inference time."
53
+
54
+
55
+ gr.Interface(fn=infer, inputs=[source_img, "text"], outputs=gallery,title=title,description=description,css=custom_css).queue(max_size=100).launch(enable_queue=True)