Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -291,30 +291,21 @@ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_stren
|
|
291 |
print('Prompt processing took: ', elapsed_time, 'seconds')
|
292 |
print("Processing image...")
|
293 |
st = time.time()
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
inputs["image_embeds"] = face_emb
|
310 |
-
inputs["control_image"] = [face_kps, image_zoe.resize((height, width))]
|
311 |
-
inputs["controlnet_conditioning_scale"] = [face_strength, depth_control_scale]
|
312 |
-
else:
|
313 |
-
inputs["control_image"] = [image_zoe.resize((height, width))]
|
314 |
-
inputs["controlnet_conditioning_scale"] = [depth_control_scale]
|
315 |
-
|
316 |
-
image = pipe(**inputs).images[0]
|
317 |
-
|
318 |
et = time.time()
|
319 |
elapsed_time = et - st
|
320 |
print('Image processing took: ', elapsed_time, 'seconds')
|
@@ -326,20 +317,14 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
|
|
326 |
custom_lora_path = custom_lora[0] if custom_lora else None
|
327 |
selected_state_index = selected_state.index if selected_state else -1
|
328 |
st = time.time()
|
329 |
-
|
330 |
try:
|
331 |
face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))
|
332 |
-
face_info = sorted(face_info, key=lambda x:(x['bbox'][2]-x['bbox'][0])*x['bbox'][3]-x['bbox'][1])[-1]
|
333 |
-
except:
|
334 |
-
face_info = None
|
335 |
-
|
336 |
-
if face_info:
|
337 |
face_emb = face_info['embedding']
|
338 |
face_kps = draw_kps(face_image, face_info['kps'])
|
339 |
-
|
340 |
-
|
341 |
-
face_kps = Image.new("RGB", face_image.size, (0, 0, 0)) # empty placeholder for consistency
|
342 |
-
|
343 |
et = time.time()
|
344 |
elapsed_time = et - st
|
345 |
print('Cropping and calculating face embeds took: ', elapsed_time, 'seconds')
|
|
|
291 |
print('Prompt processing took: ', elapsed_time, 'seconds')
|
292 |
print("Processing image...")
|
293 |
st = time.time()
|
294 |
+
image = pipe(
|
295 |
+
prompt_embeds=conditioning,
|
296 |
+
pooled_prompt_embeds=pooled,
|
297 |
+
negative_prompt_embeds=negative_conditioning,
|
298 |
+
negative_pooled_prompt_embeds=negative_pooled,
|
299 |
+
width=1024,
|
300 |
+
height=1024,
|
301 |
+
image_embeds=face_emb,
|
302 |
+
image=face_image,
|
303 |
+
strength=1-image_strength,
|
304 |
+
control_image=images,
|
305 |
+
num_inference_steps=36,
|
306 |
+
guidance_scale = guidance_scale,
|
307 |
+
controlnet_conditioning_scale=[face_strength, depth_control_scale],
|
308 |
+
).images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
309 |
et = time.time()
|
310 |
elapsed_time = et - st
|
311 |
print('Image processing took: ', elapsed_time, 'seconds')
|
|
|
317 |
custom_lora_path = custom_lora[0] if custom_lora else None
|
318 |
selected_state_index = selected_state.index if selected_state else -1
|
319 |
st = time.time()
|
320 |
+
face_image = center_crop_image_as_square(face_image)
|
321 |
try:
|
322 |
face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))
|
323 |
+
face_info = sorted(face_info, key=lambda x:(x['bbox'][2]-x['bbox'][0])*x['bbox'][3]-x['bbox'][1])[-1] # only use the maximum face
|
|
|
|
|
|
|
|
|
324 |
face_emb = face_info['embedding']
|
325 |
face_kps = draw_kps(face_image, face_info['kps'])
|
326 |
+
except:
|
327 |
+
raise gr.Error("No face found in your image. Only face images work here. Try again")
|
|
|
|
|
328 |
et = time.time()
|
329 |
elapsed_time = et - st
|
330 |
print('Cropping and calculating face embeds took: ', elapsed_time, 'seconds')
|