Spaces:
Running
Running
yizhangliu
commited on
Commit
·
bb4525d
1
Parent(s):
5a166e2
Update app.py
Browse files
app.py
CHANGED
@@ -71,6 +71,27 @@ def get_image_ext(img_bytes):
|
|
71 |
def diffuser_callback(i, t, latents):
|
72 |
pass
|
73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
def process(init_image, mask):
|
75 |
global model
|
76 |
'''
|
@@ -188,7 +209,8 @@ def read_content(file_path: str) -> str:
|
|
188 |
|
189 |
return content
|
190 |
|
191 |
-
def predict(dict
|
|
|
192 |
init_image = dict["image"] # .convert("RGB") #.resize((512, 512))
|
193 |
print(f'liuyz_1_', init_image)
|
194 |
print(f'liuyz_2_', init_image.convert("RGB"))
|
@@ -202,8 +224,8 @@ def predict(dict, prompt=""):
|
|
202 |
|
203 |
css = '''
|
204 |
.container {max-width: 1150px;margin: auto;padding-top: 1.5rem}
|
205 |
-
#image_upload{min-height:
|
206 |
-
#image_upload [data-testid="image"], #image_upload [data-testid="image"] > div{min-height:
|
207 |
#mask_radio .gr-form{background:transparent; border: none}
|
208 |
#word_mask{margin-top: .75em !important}
|
209 |
#word_mask textarea:disabled{opacity: 0.3}
|
@@ -252,9 +274,9 @@ with image_blocks as demo:
|
|
252 |
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
|
253 |
# prompt = gr.Textbox(placeholder = 'Your prompt (what you want in place of what is erased)', show_label=False, elem_id="input-text")
|
254 |
btn = gr.Button("Done!").style(
|
255 |
-
margin=
|
256 |
-
rounded=(
|
257 |
-
full_width=
|
258 |
)
|
259 |
'''
|
260 |
with gr.Column():
|
|
|
71 |
def diffuser_callback(i, t, latents):
|
72 |
pass
|
73 |
|
74 |
+
def preprocess_image(image):
|
75 |
+
w, h = image.size
|
76 |
+
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
|
77 |
+
image = image.resize((w, h), resample=PIL.Image.LANCZOS)
|
78 |
+
image = np.array(image).astype(np.float32) / 255.0
|
79 |
+
image = image[None].transpose(0, 3, 1, 2)
|
80 |
+
image = torch.from_numpy(image)
|
81 |
+
return 2.0 * image - 1.0
|
82 |
+
|
83 |
+
def preprocess_mask(mask):
|
84 |
+
mask = mask.convert("L")
|
85 |
+
w, h = mask.size
|
86 |
+
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
|
87 |
+
mask = mask.resize((w // 8, h // 8), resample=PIL.Image.NEAREST)
|
88 |
+
mask = np.array(mask).astype(np.float32) / 255.0
|
89 |
+
mask = np.tile(mask, (4, 1, 1))
|
90 |
+
mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
|
91 |
+
mask = 1 - mask # repaint white, keep black
|
92 |
+
mask = torch.from_numpy(mask)
|
93 |
+
return mask
|
94 |
+
|
95 |
def process(init_image, mask):
|
96 |
global model
|
97 |
'''
|
|
|
209 |
|
210 |
return content
|
211 |
|
212 |
+
def predict(dict):
|
213 |
+
print(f'liuyz_0_', dict)
|
214 |
init_image = dict["image"] # .convert("RGB") #.resize((512, 512))
|
215 |
print(f'liuyz_1_', init_image)
|
216 |
print(f'liuyz_2_', init_image.convert("RGB"))
|
|
|
224 |
|
225 |
css = '''
|
226 |
.container {max-width: 1150px;margin: auto;padding-top: 1.5rem}
|
227 |
+
#image_upload{min-height:auto}
|
228 |
+
#image_upload [data-testid="image"], #image_upload [data-testid="image"] > div{min-height: auto}
|
229 |
#mask_radio .gr-form{background:transparent; border: none}
|
230 |
#word_mask{margin-top: .75em !important}
|
231 |
#word_mask textarea:disabled{opacity: 0.3}
|
|
|
274 |
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
|
275 |
# prompt = gr.Textbox(placeholder = 'Your prompt (what you want in place of what is erased)', show_label=False, elem_id="input-text")
|
276 |
btn = gr.Button("Done!").style(
|
277 |
+
margin=True,
|
278 |
+
rounded=(True, True, True, True),
|
279 |
+
full_width=True,
|
280 |
)
|
281 |
'''
|
282 |
with gr.Column():
|