Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@ from diffusers import AutoPipelineForImage2Image
|
|
11 |
from diffusers.utils import make_image_grid, load_image
|
12 |
import uuid
|
13 |
|
14 |
-
base_url=f'https://omnibus-top-20-img-img.hf.space/file='
|
15 |
loaded_model=[]
|
16 |
for i,model in enumerate(models):
|
17 |
try:
|
@@ -21,8 +21,8 @@ for i,model in enumerate(models):
|
|
21 |
pass
|
22 |
print (loaded_model)
|
23 |
|
24 |
-
pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None, variant="fp16", use_safetensors=True).to("cpu")
|
25 |
-
pipeline.unet = torch.compile(pipeline.unet)
|
26 |
|
27 |
grid_wide=10
|
28 |
|
@@ -63,6 +63,76 @@ def get_concat_v_cut(in1, in2):
|
|
63 |
def load_model(model_drop):
|
64 |
pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float32, use_safetensors=True)
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
def run_dif(prompt,im_path,model_drop,cnt,strength,guidance,infer,im_height,im_width):
|
67 |
uid=uuid.uuid4()
|
68 |
print(f'im_path:: {im_path}')
|
@@ -251,7 +321,8 @@ with gr.Blocks(css=css) as app:
|
|
251 |
strength=gr.Slider(label="Strength",minimum=0,maximum=1,step=0.1,value=0.2)
|
252 |
guidance=gr.Slider(label="Guidance",minimum=0,maximum=10,step=0.1,value=8.0)
|
253 |
infer=gr.Slider(label="Inference Steps",minimum=0,maximum=50,step=1,value=10)
|
254 |
-
|
|
|
255 |
with gr.Row():
|
256 |
btn=gr.Button()
|
257 |
stop_btn=gr.Button("Stop")
|
@@ -271,6 +342,9 @@ with gr.Blocks(css=css) as app:
|
|
271 |
|
272 |
im_list=gr.Textbox(visible=False)
|
273 |
im_btn.click(load_im,inp_im,[outp,im_list,im_height,im_width])
|
274 |
-
|
|
|
|
|
|
|
275 |
stop_btn.click(None,None,None,cancels=[go_btn])
|
276 |
app.queue().launch()
|
|
|
11 |
from diffusers.utils import make_image_grid, load_image
|
12 |
import uuid
|
13 |
|
14 |
+
base_url=f'https://omnibus-top-20-img-img-tint.hf.space/file='
|
15 |
loaded_model=[]
|
16 |
for i,model in enumerate(models):
|
17 |
try:
|
|
|
21 |
pass
|
22 |
print (loaded_model)
|
23 |
|
24 |
+
#pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None, variant="fp16", use_safetensors=True).to("cpu")
|
25 |
+
#pipeline.unet = torch.compile(pipeline.unet)
|
26 |
|
27 |
grid_wide=10
|
28 |
|
|
|
63 |
def load_model(model_drop):
|
64 |
pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float32, use_safetensors=True)
|
65 |
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
def run_dif_color(out_prompt,im_path,model_drop,tint,im_height,im_width):
|
70 |
+
p_seed=""
|
71 |
+
out_box=[]
|
72 |
+
out_html=""
|
73 |
+
for i,ea in enumerate(im_path.root):
|
74 |
+
print(f'root::{im_path.root[i]}')
|
75 |
+
#print(f'ea:: {ea}')
|
76 |
+
#print(f'impath:: {im_path.path}')
|
77 |
+
url = base_url+im_path.root[i].image.path
|
78 |
+
myimg = cv2.imread(im_path.root[i].image.path)
|
79 |
+
avg_color_per_row = numpy.average(myimg, axis=0)
|
80 |
+
avg_color = numpy.average(avg_color_per_row, axis=0)
|
81 |
+
#print(avg_color)
|
82 |
+
|
83 |
+
#h=color.lstrip('#')
|
84 |
+
#h = input('Enter hex: ').lstrip('#')
|
85 |
+
#print('RGB =', tuple(int(h[i:i+2], 16) for i in (0, 2, 4)))
|
86 |
+
#color=tuple(int(h[i:i+2], 16) for i in (0, 2, 4))
|
87 |
+
r,g,b= avg_color
|
88 |
+
|
89 |
+
color = (int(r),int(g),int(b))
|
90 |
+
|
91 |
+
|
92 |
+
print (color)
|
93 |
+
|
94 |
+
#for i,ea in enumerate(loaded_model):
|
95 |
+
|
96 |
+
|
97 |
+
|
98 |
+
#for i in range(int(cnt)):
|
99 |
+
rand=random.randint(1,500)
|
100 |
+
for i in range(rand):
|
101 |
+
p_seed+=" "
|
102 |
+
try:
|
103 |
+
#model=gr.load(f'models/{model[int(model_drop)]}')
|
104 |
+
model=loaded_model[int(model_drop)]
|
105 |
+
out_img=model(out_prompt+p_seed)
|
106 |
+
print(out_img)
|
107 |
+
|
108 |
+
raw=Image.open(out_img)
|
109 |
+
raw=raw.convert('RGB')
|
110 |
+
|
111 |
+
colorize = RGBTransform().mix_with(color,factor=float(tint)).applied_to(raw)
|
112 |
+
|
113 |
+
out_box.append(colorize)
|
114 |
+
except Exception as e:
|
115 |
+
print(e)
|
116 |
+
out_html=str(e)
|
117 |
+
pass
|
118 |
+
|
119 |
+
yield out_box,out_html
|
120 |
+
|
121 |
+
|
122 |
+
|
123 |
+
|
124 |
+
|
125 |
+
|
126 |
+
|
127 |
+
|
128 |
+
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
|
136 |
def run_dif(prompt,im_path,model_drop,cnt,strength,guidance,infer,im_height,im_width):
|
137 |
uid=uuid.uuid4()
|
138 |
print(f'im_path:: {im_path}')
|
|
|
321 |
strength=gr.Slider(label="Strength",minimum=0,maximum=1,step=0.1,value=0.2)
|
322 |
guidance=gr.Slider(label="Guidance",minimum=0,maximum=10,step=0.1,value=8.0)
|
323 |
infer=gr.Slider(label="Inference Steps",minimum=0,maximum=50,step=1,value=10)
|
324 |
+
tint = gr.Slider(label="Tint Strength", minimum=0, maximum=1, step=0.01, value=0.30)
|
325 |
+
|
326 |
with gr.Row():
|
327 |
btn=gr.Button()
|
328 |
stop_btn=gr.Button("Stop")
|
|
|
342 |
|
343 |
im_list=gr.Textbox(visible=False)
|
344 |
im_btn.click(load_im,inp_im,[outp,im_list,im_height,im_width])
|
345 |
+
|
346 |
+
go_btn=btn.click(run_dif_color,[inp,outp,model_drop,tint,im_height,im_width],[fingal,out_html])
|
347 |
+
|
348 |
+
#go_btn = btn.click(run_dif_color,[inp,outp,model_drop,cnt,strength,guidance,infer,im_height,im_width],[fin,out_html])
|
349 |
stop_btn.click(None,None,None,cancels=[go_btn])
|
350 |
app.queue().launch()
|