Yuliang commited on
Commit
a36c88f
1 Parent(s): 4a4217c

try to fix

Browse files
Files changed (3) hide show
  1. app.py +36 -53
  2. apps/infer.py +2 -2
  3. lib/common/render.py +4 -4
app.py CHANGED
@@ -88,7 +88,8 @@ description = '''
88
  </details>
89
 
90
  <center>
91
- <h2> Generate pose & prompt-guided images / Upload photos / Use examples &rarr; Submit Image (~2min) &rarr; Generate Video (~2min) </h2>
 
92
  </center>
93
  '''
94
 
@@ -244,16 +245,30 @@ with gr.Blocks() as demo:
244
  gr.Markdown(hint_prompts)
245
 
246
  with gr.Column():
247
- gallery = gr.Gallery().style(grid=[2], height="auto")
248
  gallery_cache = gr.State()
249
- inp = gr.Image(type="filepath", label="Input Image for ECON")
 
 
 
 
 
 
 
 
 
 
250
  fitting_step = gr.inputs.Slider(
251
- 10, 100, step=10, label='Fitting steps (Slower yet Better-aligned SMPL-X)', default=default_step
 
 
 
 
252
  )
253
 
254
  with gr.Row():
255
  btn_sample = gr.Button("Generate Image")
256
- btn_submit = gr.Button("Submit Image (~2min)")
257
 
258
  btn_sample.click(
259
  fn=generate_images,
@@ -277,59 +292,37 @@ with gr.Blocks() as demo:
277
  examples=list(examples_pose),
278
  inputs=[inp],
279
  cache_examples=cached,
280
- fn=generate_model,
281
- outputs=out_lst,
282
  label="Hard Pose Examples"
283
  )
284
-
285
  gr.Examples(
286
  examples=list(examples_cloth),
287
  inputs=[inp],
288
  cache_examples=cached,
289
- fn=generate_model,
290
- outputs=out_lst,
291
  label="Loose Cloth Examples"
292
  )
293
-
294
  out_vid = gr.Video(label="Shared on Twitter with #ECON")
295
 
296
  with gr.Column():
297
  overlap_inp = gr.Image(type="filepath", label="Image Normal Overlap").style(height=400)
298
- out_final = gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0], label="Clothed human", elem_id="avatar")
299
- out_smpl = gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL-X body", elem_id="avatar")
 
 
 
 
300
 
301
- out_final_obj = gr.State()
302
  vis_tensor_path = gr.State()
303
 
304
  with gr.Row():
305
- btn_video = gr.Button("Generate Video (~2min)")
306
-
307
- # with gr.Row():
308
- # btn_texture = gr.Button("Generate Full-texture")
309
-
310
- # with gr.Row():
311
- # prompt = gr.Textbox(
312
- # label="Enter your prompt to texture the mesh",
313
- # max_lines=10,
314
- # placeholder=
315
- # "best quality, extremely detailed, solid color background, super detail, high detail, edge lighting, soft focus, light and dark contrast, 8k, high detail, edge lighting, 3d, c4d, blender, oc renderer, ultra high definition, 3d rendering",
316
- # )
317
- # seed = gr.Slider(label='Seed', minimum=0, maximum=100000, value=3, step=1)
318
- # guidance_scale = gr.Slider(
319
- # label='Guidance scale', minimum=0, maximum=50, value=7.5, step=0.1
320
- # )
321
-
322
- # progress_text = gr.Text(label='Progress')
323
-
324
- # with gr.Tabs():
325
- # with gr.TabItem(label='Images from each viewpoint'):
326
- # viewpoint_images = gr.Gallery(show_label=False)
327
- # with gr.TabItem(label='Result video'):
328
- # result_video = gr.Video(show_label=False)
329
- # with gr.TabItem(label='Output mesh file'):
330
- # output_file = gr.File(show_label=False)
331
-
332
- out_lst = [out_smpl, out_final, out_final_obj, overlap_inp, vis_tensor_path]
333
 
334
  btn_video.click(
335
  fn=generate_video,
@@ -338,20 +331,10 @@ with gr.Blocks() as demo:
338
  )
339
 
340
  btn_submit.click(fn=generate_model, inputs=[inp, fitting_step], outputs=out_lst)
341
-
342
- # btn_texture.click(
343
- # fn=generate_texture,
344
- # inputs=[out_final_obj, prompt, seed, guidance_scale],
345
- # outputs=[viewpoint_images, result_video, output_file, progress_text]
346
- # )
347
-
348
  demo.load(None, None, None, _js=load_js)
349
 
350
  if __name__ == "__main__":
351
 
352
- # demo.launch(debug=False, enable_queue=False,
353
- # auth=(os.environ['USER'], os.environ['PASSWORD']),
354
- # auth_message="Register at icon.is.tue.mpg.de to get HuggingFace username and password.")
355
-
356
  demo.queue(concurrency_count=1)
357
  demo.launch(debug=True, enable_queue=True)
 
88
  </details>
89
 
90
  <center>
91
+ <a href="https://huggingface.co/spaces/Yuliang/ECON?duplicate=true"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-lg-dark.svg"/></a>
92
+ <h2> Generate pose & prompt-guided images / Upload photos / Use examples &rarr; Submit Image (~3min) &rarr; Generate Video (~3min) </h2>
93
  </center>
94
  '''
95
 
 
245
  gr.Markdown(hint_prompts)
246
 
247
  with gr.Column():
248
+ gallery = gr.Gallery(label="Generated Images").style(grid=[2], height="auto")
249
  gallery_cache = gr.State()
250
+
251
+ gr.Markdown(
252
+ '''
253
+ <center>
254
+ <strong>Click the target generated image for Reconstruction.</strong> <br>
255
+ &darr;
256
+ </center>
257
+ '''
258
+ )
259
+
260
+ inp = gr.Image(type="filepath", label="Input Image for Reconstruction")
261
  fitting_step = gr.inputs.Slider(
262
+ 10,
263
+ 100,
264
+ step=10,
265
+ label='Fitting steps (Slower yet Better-aligned SMPL-X)',
266
+ default=default_step
267
  )
268
 
269
  with gr.Row():
270
  btn_sample = gr.Button("Generate Image")
271
+ btn_submit = gr.Button("Submit Image (~3min)")
272
 
273
  btn_sample.click(
274
  fn=generate_images,
 
292
  examples=list(examples_pose),
293
  inputs=[inp],
294
  cache_examples=cached,
295
+ fn=None,
296
+ outputs=None,
297
  label="Hard Pose Examples"
298
  )
299
+
300
  gr.Examples(
301
  examples=list(examples_cloth),
302
  inputs=[inp],
303
  cache_examples=cached,
304
+ fn=None,
305
+ outputs=None,
306
  label="Loose Cloth Examples"
307
  )
308
+
309
  out_vid = gr.Video(label="Shared on Twitter with #ECON")
310
 
311
  with gr.Column():
312
  overlap_inp = gr.Image(type="filepath", label="Image Normal Overlap").style(height=400)
313
+ out_final = gr.Model3D(
314
+ clear_color=[0.0, 0.0, 0.0, 0.0], label="Clothed human", elem_id="avatar"
315
+ )
316
+ out_smpl = gr.Model3D(
317
+ clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL-X body", elem_id="avatar"
318
+ )
319
 
 
320
  vis_tensor_path = gr.State()
321
 
322
  with gr.Row():
323
+ btn_video = gr.Button("Generate Video (~3min)")
324
+
325
+ out_lst = [out_smpl, out_final, overlap_inp, vis_tensor_path]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
 
327
  btn_video.click(
328
  fn=generate_video,
 
331
  )
332
 
333
  btn_submit.click(fn=generate_model, inputs=[inp, fitting_step], outputs=out_lst)
334
+
 
 
 
 
 
 
335
  demo.load(None, None, None, _js=load_js)
336
 
337
  if __name__ == "__main__":
338
 
 
 
 
 
339
  demo.queue(concurrency_count=1)
340
  demo.launch(debug=True, enable_queue=True)
apps/infer.py CHANGED
@@ -652,7 +652,7 @@ def generate_model(in_path, fitting_step=50):
652
 
653
  smpl_glb_path = smpl_obj_path.replace(".obj", ".glb")
654
  # smpl_npy_path = smpl_obj_path.replace(".obj", ".npy")
655
- refine_obj_path = final_path
656
  refine_glb_path = final_path.replace(".obj", ".glb")
657
  overlap_path = img_overlap_path
658
  vis_tensor_path = osp.join(out_dir, cfg.name, f"vid/{data['name']}_in_tensor.pt")
@@ -666,4 +666,4 @@ def generate_model(in_path, fitting_step=50):
666
  # gc.collect()
667
  # torch.cuda.empty_cache()
668
 
669
- return [smpl_glb_path, refine_glb_path, refine_obj_path, overlap_path, vis_tensor_path]
 
652
 
653
  smpl_glb_path = smpl_obj_path.replace(".obj", ".glb")
654
  # smpl_npy_path = smpl_obj_path.replace(".obj", ".npy")
655
+ # refine_obj_path = final_path
656
  refine_glb_path = final_path.replace(".obj", ".glb")
657
  overlap_path = img_overlap_path
658
  vis_tensor_path = osp.join(out_dir, cfg.name, f"vid/{data['name']}_in_tensor.pt")
 
666
  # gc.collect()
667
  # torch.cuda.empty_cache()
668
 
669
+ return [smpl_glb_path, refine_glb_path, overlap_path, vis_tensor_path]
lib/common/render.py CHANGED
@@ -307,14 +307,14 @@ class Render:
307
  height, width = data["img_raw"].shape[2:]
308
 
309
  width = int(width / (height / 256.0))
310
- height = 256
311
 
312
  fourcc = cv2.VideoWriter_fourcc(*"mp4v")
313
  video = cv2.VideoWriter(
314
  save_path,
315
  fourcc,
316
  self.fps,
317
- (width * 3, int(height)),
318
  )
319
 
320
  pbar = tqdm(range(len(self.meshes)))
@@ -355,10 +355,10 @@ class Render:
355
  data)
356
  img_cloth = blend_rgb_norm((torch.stack(mesh_renders)[num_obj:, cam_id] - 0.5) * 2.0,
357
  data)
358
- final_img = torch.cat([img_raw, img_smpl, img_cloth], dim=-1).squeeze(0)
359
 
360
  final_img_rescale = F.interpolate(
361
- final_img, size=(height, width), mode="bilinear", align_corners=False
362
  ).squeeze(0).permute(1, 2, 0).numpy().astype(np.uint8)
363
 
364
  video.write(final_img_rescale[:, :, ::-1])
 
307
  height, width = data["img_raw"].shape[2:]
308
 
309
  width = int(width / (height / 256.0))
310
+ height = int(256)
311
 
312
  fourcc = cv2.VideoWriter_fourcc(*"mp4v")
313
  video = cv2.VideoWriter(
314
  save_path,
315
  fourcc,
316
  self.fps,
317
+ (width * 3, height),
318
  )
319
 
320
  pbar = tqdm(range(len(self.meshes)))
 
355
  data)
356
  img_cloth = blend_rgb_norm((torch.stack(mesh_renders)[num_obj:, cam_id] - 0.5) * 2.0,
357
  data)
358
+ final_img = torch.cat([img_raw, img_smpl, img_cloth], dim=-1)
359
 
360
  final_img_rescale = F.interpolate(
361
+ final_img, size=(height, width*3), mode="bilinear", align_corners=False
362
  ).squeeze(0).permute(1, 2, 0).numpy().astype(np.uint8)
363
 
364
  video.write(final_img_rescale[:, :, ::-1])