Omnibus commited on
Commit
91e93b7
1 Parent(s): 8055b15

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -140
app.py CHANGED
@@ -10,7 +10,7 @@ import torch
10
  from diffusers import AutoPipelineForImage2Image
11
  from diffusers.utils import make_image_grid, load_image
12
 
13
- base_url=f'https://omnibus-top-20-img-img.hf.space/file='
14
  loaded_model=[]
15
  for i,model in enumerate(models):
16
  try:
@@ -26,117 +26,16 @@ def load_model(model_drop):
26
  pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float32, use_safetensors=True)
27
 
28
  def run_dif(prompt,im_path,model_drop,cnt,strength,guidance,infer):
29
- print(f'im_path:: {im_path}')
30
- print(f'im_path0:: {im_path.root[0]}')
31
- print(f'im_path0.image.path:: {im_path.root[0].image.path}')
32
  out_box=[]
33
-
34
- for i,ea in enumerate(im_path.root):
35
- print(f'root::{im_path.root[i]}')
36
- #print(f'ea:: {ea}')
37
- #print(f'impath:: {im_path.path}')
38
- url = base_url+im_path.root[i].image.path
39
  print(url)
40
- #init_image = load_image(url)
41
  init_image=load_image(url)
42
- #prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
43
-
44
- # pass prompt and image to pipeline
45
  #image = pipeline(prompt, image=init_image, strength=0.8,guidance_scale=8.0,negative_prompt=negative_prompt,num_inference_steps=50).images[0]
46
  image = pipeline(prompt, image=init_image, strength=float(strength),guidance_scale=float(guidance),num_inference_steps=int(infer)).images[0]
47
- #make_image_grid([init_image, image], rows=1, cols=2)
48
  out_box.append(image)
49
- yield out_box,""
50
-
51
-
52
-
53
-
54
- def run_dif_old(out_prompt,model_drop,cnt):
55
- p_seed=""
56
- out_box=[]
57
- out_html=""
58
- #for i,ea in enumerate(loaded_model):
59
- for i in range(int(cnt)):
60
- p_seed+=" "
61
- try:
62
- model=loaded_model[int(model_drop)]
63
- out_img=model(out_prompt+p_seed)
64
- print(out_img)
65
- out_box.append(out_img)
66
- except Exception as e:
67
- print(e)
68
- out_html=str(e)
69
- pass
70
- yield out_box,out_html
71
-
72
- def run_dif_og(out_prompt,model_drop,cnt):
73
- out_box=[]
74
- out_html=""
75
- #for i,ea in enumerate(loaded_model):
76
- for i in range(cnt):
77
- try:
78
- #print (ea)
79
- model=loaded_model[int(model_drop)]
80
- out_img=model(out_prompt)
81
- print(out_img)
82
- url=f'https://omnibus-top-20.hf.space/file={out_img}'
83
- print(url)
84
- uid = uuid.uuid4()
85
- #urllib.request.urlretrieve(image, 'tmp.png')
86
- #out=Image.open('tmp.png')
87
- r = requests.get(url, stream=True)
88
-
89
- if r.status_code == 200:
90
- img_buffer = io.BytesIO(r.content)
91
- print (f'bytes:: {io.BytesIO(r.content)}')
92
- str_equivalent_image = base64.b64encode(img_buffer.getvalue()).decode()
93
- img_tag = "<img src='data:image/png;base64," + str_equivalent_image + "'/>"
94
- out_html+=f"<div class='img_class'><a href='https://huggingface.co/models/{models[i]}'>{models[i]}</a><br>"+img_tag+"</div>"
95
- out = Image.open(io.BytesIO(r.content))
96
- out_box.append(out)
97
- html_out = "<div class='grid_class'>"+out_html+"</div>"
98
- yield out_box,html_out
99
- except Exception as e:
100
- out_html+=str(e)
101
- html_out = "<div class='grid_class'>"+out_html+"</div>"
102
-
103
- yield out_box,html_out
104
-
105
- def thread_dif(out_prompt,mod):
106
- out_box=[]
107
- out_html=""
108
- #for i,ea in enumerate(loaded_model):
109
- try:
110
- print (ea)
111
- model=loaded_model[int(mod)]
112
- out_img=model(out_prompt)
113
- print(out_img)
114
- url=f'https://omnibus-top-20.hf.space/file={out_img}'
115
- print(url)
116
- uid = uuid.uuid4()
117
- #urllib.request.urlretrieve(image, 'tmp.png')
118
- #out=Image.open('tmp.png')
119
- r = requests.get(url, stream=True)
120
-
121
- if r.status_code == 200:
122
- img_buffer = io.BytesIO(r.content)
123
- print (f'bytes:: {io.BytesIO(r.content)}')
124
- str_equivalent_image = base64.b64encode(img_buffer.getvalue()).decode()
125
- img_tag = "<img src='data:image/png;base64," + str_equivalent_image + "'/>"
126
-
127
- #out_html+=f"<div class='img_class'><a href='https://huggingface.co/models/{models[i]}'>{models[i]}</a><br>"+img_tag+"</div>"
128
- out = Image.open(io.BytesIO(r.content))
129
- out_box.append(out)
130
- else:
131
- out_html=r.status_code
132
- html_out = "<div class='grid_class'>"+out_html+"</div>"
133
- return out_box,html_out
134
- except Exception as e:
135
- out_html=str(e)
136
- #out_html+=str(e)
137
- html_out = "<div class='grid_class'>"+out_html+"</div>"
138
-
139
- return out_box,html_out
140
 
141
 
142
  css="""
@@ -150,36 +49,6 @@ min-width:200px;
150
 
151
  """
152
 
153
- def load_im(img):
154
- im_box=[]
155
- im = Image.open(img)
156
- width, height = im.size
157
- new_w=int(width/10)
158
- new_h=new_w
159
- w=0
160
- h=0
161
- newsize=(512,512)
162
- for i in range(int(height/new_h)):
163
- print(i)
164
- for b in range(10):
165
- print(b)
166
- # Setting the points for cropped image
167
- left = w
168
- top = h
169
- right = left+new_w
170
- bottom = top+new_h
171
-
172
- # Cropped image of above dimension
173
- # (It will not change original image)
174
- im1 = im.crop((left, top, right, bottom))
175
- im1 = im1.resize(newsize)
176
-
177
- im_box.append(im1)
178
- w+=new_w
179
- #yield im_box,[]
180
- h+=new_h
181
- w=0
182
- yield im_box,im_box
183
  with gr.Blocks(css=css) as app:
184
  with gr.Row():
185
  with gr.Column():
@@ -199,9 +68,9 @@ with gr.Blocks(css=css) as app:
199
  cnt = gr.Number(value=1)
200
  out_html=gr.HTML()
201
  outp=gr.Gallery(columns=10)
202
- fingal=gr.Gallery(columns=10)
203
- im_list=gr.Textbox()
204
- im_btn.click(load_im,inp_im,[outp,im_list])
205
- go_btn = btn.click(run_dif,[inp,outp,model_drop,cnt,strength,guidance,infer],[fingal,out_html])
206
  stop_btn.click(None,None,None,cancels=[go_btn])
207
  app.queue().launch()
 
10
  from diffusers import AutoPipelineForImage2Image
11
  from diffusers.utils import make_image_grid, load_image
12
 
13
+ base_url=f'https://omnibus-top-20-img-img-basic.hf.space/file='
14
  loaded_model=[]
15
  for i,model in enumerate(models):
16
  try:
 
26
  pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float32, use_safetensors=True)
27
 
28
  def run_dif(prompt,im_path,model_drop,cnt,strength,guidance,infer):
 
 
 
29
  out_box=[]
30
+ for i in range(cnt):
31
+ yield out_box,f"Working on {i} of {cnt}"
32
+ url = base_url+im_path
 
 
 
33
  print(url)
 
34
  init_image=load_image(url)
 
 
 
35
  #image = pipeline(prompt, image=init_image, strength=0.8,guidance_scale=8.0,negative_prompt=negative_prompt,num_inference_steps=50).images[0]
36
  image = pipeline(prompt, image=init_image, strength=float(strength),guidance_scale=float(guidance),num_inference_steps=int(infer)).images[0]
 
37
  out_box.append(image)
38
+ yield out_box,"Complete"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
 
41
  css="""
 
49
 
50
  """
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  with gr.Blocks(css=css) as app:
53
  with gr.Row():
54
  with gr.Column():
 
68
  cnt = gr.Number(value=1)
69
  out_html=gr.HTML()
70
  outp=gr.Gallery(columns=10)
71
+ #fingal=gr.Gallery(columns=10)
72
+ #im_list=gr.Textbox()
73
+ #im_btn.click(load_im,inp_im,[outp,im_list])
74
+ go_btn = btn.click(run_dif,[inp,inp_im,model_drop,cnt,strength,guidance,infer],[outp,out_html])
75
  stop_btn.click(None,None,None,cancels=[go_btn])
76
  app.queue().launch()