Spaces:
Running
Running
yizhangliu
commited on
Commit
•
31e7adc
1
Parent(s):
98d0e5b
Update app.py
Browse files
app.py
CHANGED
@@ -223,7 +223,7 @@ def model_process(input): #image, mask):
|
|
223 |
# {'image': '/tmp/tmp8mn9xw93.png', 'mask': '/tmp/tmpn5ars4te.png'}
|
224 |
# input = request.files
|
225 |
# RGB
|
226 |
-
origin_image_bytes = input["image"].read()
|
227 |
print(f'origin_image_bytes = ', type(origin_image_bytes), len(origin_image_bytes))
|
228 |
|
229 |
image, alpha_channel = load_img(origin_image_bytes)
|
@@ -274,7 +274,7 @@ def model_process(input): #image, mask):
|
|
274 |
logger.info(f"Resized image shape: {image.shape} / {type(image)}")
|
275 |
print(f"Resized image shape: {image.shape} / {image[250][250]}")
|
276 |
|
277 |
-
mask, _ = load_img(input["mask"].read(), gray=True)
|
278 |
mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
|
279 |
print(f"mask image shape: {mask.shape} / {type(mask)} / {mask[250][250]}")
|
280 |
|
@@ -339,8 +339,8 @@ def read_content(file_path: str) -> str:
|
|
339 |
|
340 |
return content
|
341 |
|
342 |
-
def predict(
|
343 |
-
print(f'liuyz_0_',
|
344 |
|
345 |
'''
|
346 |
image = dict["image"] # .convert("RGB") #.resize((512, 512))
|
@@ -351,7 +351,7 @@ def predict(dict):
|
|
351 |
# mask = dict["mask"] # .convert("RGB") #.resize((512, 512))
|
352 |
'''
|
353 |
|
354 |
-
output = model_process(
|
355 |
# output = mask #output.images[0]
|
356 |
# output = pipe(prompt = prompt, image=init_image, mask_image=mask,guidance_scale=7.5)
|
357 |
|
@@ -447,7 +447,7 @@ with image_blocks as demo:
|
|
447 |
with gr.Box():
|
448 |
with gr.Row():
|
449 |
with gr.Column():
|
450 |
-
image = gr.Image(source='upload', tool='sketch', type='
|
451 |
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
|
452 |
# prompt = gr.Textbox(placeholder = 'Your prompt (what you want in place of what is erased)', show_label=False, elem_id="input-text")
|
453 |
btn = gr.Button("Done!").style(
|
|
|
223 |
# {'image': '/tmp/tmp8mn9xw93.png', 'mask': '/tmp/tmpn5ars4te.png'}
|
224 |
# input = request.files
|
225 |
# RGB
|
226 |
+
origin_image_bytes = open(input["image"],'rb').read()
|
227 |
print(f'origin_image_bytes = ', type(origin_image_bytes), len(origin_image_bytes))
|
228 |
|
229 |
image, alpha_channel = load_img(origin_image_bytes)
|
|
|
274 |
logger.info(f"Resized image shape: {image.shape} / {type(image)}")
|
275 |
print(f"Resized image shape: {image.shape} / {image[250][250]}")
|
276 |
|
277 |
+
mask, _ = load_img(open(input["mask"],'rb').read(), gray=True)
|
278 |
mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
|
279 |
print(f"mask image shape: {mask.shape} / {type(mask)} / {mask[250][250]}")
|
280 |
|
|
|
339 |
|
340 |
return content
|
341 |
|
342 |
+
def predict(input):
|
343 |
+
print(f'liuyz_0_', input)
|
344 |
|
345 |
'''
|
346 |
image = dict["image"] # .convert("RGB") #.resize((512, 512))
|
|
|
351 |
# mask = dict["mask"] # .convert("RGB") #.resize((512, 512))
|
352 |
'''
|
353 |
|
354 |
+
output = model_process(input) # dict["image"], dict["mask"])
|
355 |
# output = mask #output.images[0]
|
356 |
# output = pipe(prompt = prompt, image=init_image, mask_image=mask,guidance_scale=7.5)
|
357 |
|
|
|
447 |
with gr.Box():
|
448 |
with gr.Row():
|
449 |
with gr.Column():
|
450 |
+
image = gr.Image(source='upload', tool='sketch', type='filepath',label="Upload").style(height=512)
|
451 |
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
|
452 |
# prompt = gr.Textbox(placeholder = 'Your prompt (what you want in place of what is erased)', show_label=False, elem_id="input-text")
|
453 |
btn = gr.Button("Done!").style(
|