Spaces:
Running
on
L40S
Running
on
L40S
Upload app_hg.py with huggingface_hub
Browse files
app_hg.py
CHANGED
@@ -34,68 +34,68 @@ from PIL import Image
|
|
34 |
from einops import rearrange
|
35 |
import pandas as pd
|
36 |
|
37 |
-
import sys
|
38 |
-
import spaces
|
39 |
-
import subprocess
|
40 |
-
from huggingface_hub import snapshot_download
|
41 |
-
|
42 |
-
def install_cuda_toolkit():
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
|
59 |
-
def install_requirements():
|
60 |
-
|
61 |
-
|
62 |
|
63 |
|
64 |
-
def download_models():
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
|
96 |
-
# install_cuda_toolkit()
|
97 |
-
install_requirements()
|
98 |
-
download_models() ### download weights !!!!
|
99 |
|
100 |
from infer import seed_everything, save_gif
|
101 |
from infer import Text2Image, Removebg, Image2Views, Views2Mesh, GifRenderer
|
@@ -207,82 +207,93 @@ def gen_save_folder(max_size=30):
|
|
207 |
print(f"mkdir {save_folder} suceess !!!")
|
208 |
return save_folder
|
209 |
|
210 |
-
|
211 |
-
|
212 |
-
|
|
|
|
|
213 |
image = worker_t2i(text, seed, step)
|
214 |
-
image.save(
|
215 |
img_nobg = worker_xbg(image, force=True)
|
216 |
-
|
217 |
-
img_nobg
|
218 |
-
|
219 |
-
|
220 |
-
def stage_1_xbg(image, save_folder, force_remove):
|
221 |
-
if isinstance(image, str):
|
222 |
-
image = Image.open(image)
|
223 |
-
dst = save_folder + '/img_nobg.png'
|
224 |
-
rgba = worker_xbg(image, force=force_remove)
|
225 |
-
rgba.save(dst)
|
226 |
-
return dst
|
227 |
-
|
228 |
-
@spaces.GPU
|
229 |
-
def stage_2_i2v(image, seed, step, save_folder):
|
230 |
-
if isinstance(image, str):
|
231 |
-
image = Image.open(image)
|
232 |
-
gif_dst = save_folder + '/views.gif'
|
233 |
-
res_img, pils = worker_i2v(image, seed, step)
|
234 |
-
save_gif(pils, gif_dst)
|
235 |
views_img, cond_img = res_img[0], res_img[1]
|
236 |
img_array = np.asarray(views_img, dtype=np.uint8)
|
237 |
show_img = rearrange(img_array, '(n h) (m w) c -> (n m) h w c', n=3, m=2)
|
238 |
-
show_img = show_img[worker_i2v.order, ...]
|
239 |
-
show_img =
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
@spaces.GPU
|
244 |
-
def stage_3_v23(
|
245 |
-
views_pil,
|
246 |
-
cond_pil,
|
247 |
-
seed,
|
248 |
-
save_folder,
|
249 |
-
target_face_count = 30000,
|
250 |
-
texture_color = 'face'
|
251 |
-
):
|
252 |
-
do_texture_mapping = texture_color == 'face'
|
253 |
worker_v23(
|
254 |
-
|
255 |
-
cond_pil,
|
256 |
-
seed = seed,
|
257 |
save_folder = save_folder,
|
258 |
-
target_face_count =
|
259 |
do_texture_mapping = do_texture_mapping
|
260 |
)
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
def stage_3p_baking(save_folder, color, bake, force, front, others, align_times):
|
268 |
-
if color == "face" and bake:
|
269 |
obj_dst = worker_baker(save_folder, force, front, others, align_times)
|
270 |
glb_dst = obj_dst.replace(".obj", ".glb")
|
271 |
-
|
|
|
|
|
272 |
else:
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
286 |
def check_image_available(image):
|
287 |
if image is None:
|
288 |
return "Please upload image", gr.update()
|
@@ -335,140 +346,15 @@ def update_mode(mode):
|
|
335 |
with gr.Blocks() as demo:
|
336 |
gr.Markdown(CONST_HEADER)
|
337 |
with gr.Row(variant="panel"):
|
338 |
-
|
339 |
-
###### Input region
|
340 |
-
|
341 |
with gr.Column(scale=2):
|
342 |
-
|
343 |
-
### Text iutput region
|
344 |
-
|
345 |
with gr.Tab("Text to 3D"):
|
346 |
with gr.Column():
|
347 |
text = gr.TextArea('一只黑白相间的熊猫在白色背景上居中坐着,呈现出卡通风格和可爱氛围。',
|
348 |
lines=3, max_lines=20, label='Input text (within 70 words)')
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
value='Baking',
|
354 |
-
interactive=True
|
355 |
-
)
|
356 |
-
|
357 |
-
with gr.Accordion("Custom settings", open=False):
|
358 |
-
textgen_color = gr.Radio(choices=["vertex", "face"], label="Color", value="face")
|
359 |
-
|
360 |
-
with gr.Row():
|
361 |
-
textgen_render = gr.Checkbox(
|
362 |
-
label="Do Rendering",
|
363 |
-
value=True,
|
364 |
-
interactive=True
|
365 |
-
)
|
366 |
-
textgen_bake = gr.Checkbox(
|
367 |
-
label="Do Baking",
|
368 |
-
value=True if BAKE_AVAILEBLE else False,
|
369 |
-
interactive=True if BAKE_AVAILEBLE else False
|
370 |
-
)
|
371 |
-
|
372 |
-
with gr.Row():
|
373 |
-
textgen_seed = gr.Number(value=0, label="T2I seed", precision=0, interactive=True)
|
374 |
-
textgen_SEED = gr.Number(value=0, label="Gen seed", precision=0, interactive=True)
|
375 |
-
|
376 |
-
textgen_step = gr.Slider(
|
377 |
-
value=25,
|
378 |
-
minimum=15,
|
379 |
-
maximum=50,
|
380 |
-
step=1,
|
381 |
-
label="T2I steps",
|
382 |
-
interactive=True
|
383 |
-
)
|
384 |
-
textgen_STEP = gr.Slider(
|
385 |
-
value=50,
|
386 |
-
minimum=20,
|
387 |
-
maximum=80,
|
388 |
-
step=1,
|
389 |
-
label="Gen steps",
|
390 |
-
interactive=True
|
391 |
-
)
|
392 |
-
textgen_max_faces =gr.Slider(
|
393 |
-
value=10000,
|
394 |
-
minimum=2000,
|
395 |
-
maximum=60000,
|
396 |
-
step=1000,
|
397 |
-
label="Face number limit",
|
398 |
-
interactive=True
|
399 |
-
)
|
400 |
-
|
401 |
-
with gr.Accordion("Baking Options", open=False):
|
402 |
-
textgen_force_bake = gr.Checkbox(
|
403 |
-
label="Force (Ignore the degree of matching)",
|
404 |
-
value=False,
|
405 |
-
interactive=True
|
406 |
-
)
|
407 |
-
textgen_front_baking = gr.Radio(
|
408 |
-
choices=['input image', 'multi-view front view', 'auto'],
|
409 |
-
label="Front view baking",
|
410 |
-
value='auto',
|
411 |
-
interactive=True,
|
412 |
-
visible=True
|
413 |
-
)
|
414 |
-
textgen_other_views = gr.CheckboxGroup(
|
415 |
-
choices=['60°', '120°', '180°', '240°', '300°'],
|
416 |
-
label="Other views baking",
|
417 |
-
value=['180°'],
|
418 |
-
interactive=True,
|
419 |
-
visible=True
|
420 |
-
)
|
421 |
-
textgen_align_times = gr.Slider(
|
422 |
-
value=3,
|
423 |
-
minimum=1,
|
424 |
-
maximum=5,
|
425 |
-
step=1,
|
426 |
-
label="Number of alignment attempts per view",
|
427 |
-
interactive=True
|
428 |
-
)
|
429 |
-
|
430 |
-
with gr.Row():
|
431 |
-
textgen_submit = gr.Button("Generate", variant="primary")
|
432 |
-
|
433 |
-
with gr.Row():
|
434 |
-
gr.Examples(examples=example_ts, inputs=[text], label="Text examples", examples_per_page=10)
|
435 |
-
|
436 |
-
|
437 |
-
textgen_mode.change(
|
438 |
-
fn=update_mode,
|
439 |
-
inputs=textgen_mode,
|
440 |
-
outputs=[textgen_color, textgen_bake, textgen_max_faces, textgen_render]
|
441 |
-
)
|
442 |
-
textgen_color.change(
|
443 |
-
fn=lambda x:[
|
444 |
-
gr.update(value=(x=='face'), interactive=(x=='face'), visible=(x=='face')),
|
445 |
-
gr.update(value=(x=='face'), interactive=(x=='face'), visible=(x=='face')),
|
446 |
-
],
|
447 |
-
inputs=textgen_color,
|
448 |
-
outputs=[
|
449 |
-
textgen_bake,
|
450 |
-
textgen_render
|
451 |
-
]
|
452 |
-
)
|
453 |
-
textgen_bake.change(
|
454 |
-
fn= lambda x:[
|
455 |
-
gr.update(visible=x),
|
456 |
-
gr.update(visible=x),
|
457 |
-
gr.update(visible=x),
|
458 |
-
gr.update(visible=x),
|
459 |
-
gr.update(value=10000 if x else 120000, minimum=2000, maximum=60000 if x else 300000)
|
460 |
-
],
|
461 |
-
inputs=textgen_bake,
|
462 |
-
outputs=[
|
463 |
-
textgen_front_baking,
|
464 |
-
textgen_other_views,
|
465 |
-
textgen_align_times,
|
466 |
-
textgen_force_bake,
|
467 |
-
textgen_max_faces
|
468 |
-
]
|
469 |
-
)
|
470 |
-
|
471 |
-
### Image iutput region
|
472 |
|
473 |
with gr.Tab("Image to 3D"):
|
474 |
with gr.Row():
|
@@ -476,130 +362,127 @@ with gr.Blocks() as demo:
|
|
476 |
image_mode="RGBA", sources="upload", interactive=True)
|
477 |
with gr.Row():
|
478 |
alert_message = gr.Markdown("") # for warning
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
479 |
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
484 |
interactive=True
|
485 |
)
|
486 |
-
|
487 |
-
with gr.Accordion("Custom settings", open=False):
|
488 |
-
imggen_color = gr.Radio(choices=["vertex", "face"], label="Color", value="face")
|
489 |
|
490 |
-
|
491 |
-
|
492 |
-
label="
|
493 |
-
value=
|
494 |
-
interactive=True
|
495 |
-
)
|
496 |
-
imggen_render = gr.Checkbox(
|
497 |
-
label="Do Rendering",
|
498 |
-
value=True,
|
499 |
interactive=True
|
500 |
)
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
imggen_STEP = gr.Slider(
|
509 |
-
value=50,
|
510 |
-
minimum=20,
|
511 |
-
maximum=80,
|
512 |
-
step=1,
|
513 |
-
label="Gen steps",
|
514 |
-
interactive=True
|
515 |
)
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
522 |
interactive=True
|
523 |
)
|
524 |
|
525 |
-
|
526 |
-
|
527 |
-
|
528 |
-
|
529 |
-
|
530 |
-
)
|
531 |
-
imggen_front_baking = gr.Radio(
|
532 |
-
choices=['input image', 'multi-view front view', 'auto'],
|
533 |
-
label="Front view baking",
|
534 |
-
value='auto',
|
535 |
-
interactive=True,
|
536 |
-
visible=True
|
537 |
-
)
|
538 |
-
imggen_other_views = gr.CheckboxGroup(
|
539 |
-
choices=['60°', '120°', '180°', '240°', '300°'],
|
540 |
-
label="Other views baking",
|
541 |
-
value=['180°'],
|
542 |
-
interactive=True,
|
543 |
-
visible=True
|
544 |
-
)
|
545 |
-
imggen_align_times =gr.Slider(
|
546 |
-
value=3,
|
547 |
-
minimum=1,
|
548 |
-
maximum=5,
|
549 |
-
step=1,
|
550 |
-
label="Number of alignment attempts per view",
|
551 |
-
interactive=True
|
552 |
-
)
|
553 |
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
|
558 |
-
|
559 |
-
|
560 |
-
imggen_mode.change(
|
561 |
-
fn=update_mode,
|
562 |
-
inputs=imggen_mode,
|
563 |
-
outputs=[imggen_color, imggen_bake, imggen_max_faces, imggen_render]
|
564 |
-
)
|
565 |
-
|
566 |
-
imggen_color.change(
|
567 |
-
fn=lambda x:[
|
568 |
-
gr.update(value=(x=='face'), interactive=(x=='face'), visible=(x=='face')),
|
569 |
-
gr.update(value=(x=='face'), interactive=(x=='face'), visible=(x=='face'))
|
570 |
-
],
|
571 |
-
inputs=imggen_color,
|
572 |
-
outputs=[
|
573 |
-
imggen_bake,
|
574 |
-
imggen_render
|
575 |
-
]
|
576 |
-
)
|
577 |
-
|
578 |
-
imggen_bake.change(
|
579 |
-
fn= lambda x:[
|
580 |
-
gr.update(visible=x),
|
581 |
-
gr.update(visible=x),
|
582 |
-
gr.update(visible=x),
|
583 |
-
gr.update(visible=x),
|
584 |
-
gr.update( value=10000 if x else 120000, minimum=2000, maximum=60000 if x else 300000)
|
585 |
-
],
|
586 |
-
inputs=imggen_bake,
|
587 |
-
outputs=[
|
588 |
-
imggen_front_baking,
|
589 |
-
imggen_other_views,
|
590 |
-
imggen_align_times,
|
591 |
-
imggen_force_bake,
|
592 |
-
imggen_max_faces
|
593 |
-
]
|
594 |
-
)
|
595 |
|
596 |
-
|
597 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
598 |
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
|
|
|
|
|
|
|
|
|
|
603 |
gr.Markdown(CONST_NOTE)
|
604 |
|
605 |
###### Output region
|
@@ -607,7 +490,7 @@ with gr.Blocks() as demo:
|
|
607 |
with gr.Column(scale=3):
|
608 |
with gr.Row():
|
609 |
with gr.Column(scale=2):
|
610 |
-
|
611 |
label="Image without background",
|
612 |
type="pil",
|
613 |
image_mode="RGBA",
|
@@ -659,70 +542,20 @@ with gr.Blocks() as demo:
|
|
659 |
# gradio running code
|
660 |
#===============================================================
|
661 |
|
662 |
-
save_folder = gr.State()
|
663 |
-
cond_image = gr.State()
|
664 |
-
views_image = gr.State()
|
665 |
-
|
666 |
-
def handle_click(save_folder):
|
667 |
-
if save_folder is None:
|
668 |
-
save_folder = gen_save_folder()
|
669 |
-
return save_folder
|
670 |
-
|
671 |
textgen_submit.click(
|
672 |
-
fn=
|
673 |
-
inputs=[
|
674 |
-
|
675 |
-
|
676 |
-
|
677 |
-
inputs=[text, textgen_seed, textgen_step, save_folder],
|
678 |
-
outputs=[rem_bg_image],
|
679 |
-
).success(
|
680 |
-
fn=stage_2_i2v,
|
681 |
-
inputs=[rem_bg_image, textgen_SEED, textgen_STEP, save_folder],
|
682 |
-
outputs=[views_image, cond_image, result_image],
|
683 |
-
).success(
|
684 |
-
fn=stage_3_v23,
|
685 |
-
inputs=[views_image, cond_image, textgen_SEED, save_folder, textgen_max_faces, textgen_color],
|
686 |
-
outputs=[result_3dobj, result_3dglb_texture],
|
687 |
-
).success(
|
688 |
-
fn=stage_3p_baking,
|
689 |
-
inputs=[save_folder, textgen_color, textgen_bake,
|
690 |
-
textgen_force_bake, textgen_front_baking, textgen_other_views, textgen_align_times],
|
691 |
-
outputs=[result_3dglb_baked],
|
692 |
-
).success(
|
693 |
-
fn=stage_4_gif,
|
694 |
-
inputs=[save_folder, textgen_color, textgen_bake, textgen_render],
|
695 |
-
outputs=[result_gif],
|
696 |
-
).success(lambda: print('Text_to_3D Done ...\n'))
|
697 |
-
|
698 |
|
699 |
imggen_submit.click(
|
700 |
-
fn=
|
701 |
-
inputs=[
|
702 |
-
|
703 |
-
|
704 |
-
|
705 |
-
|
706 |
-
outputs=[rem_bg_image],
|
707 |
-
).success(
|
708 |
-
fn=stage_2_i2v,
|
709 |
-
inputs=[rem_bg_image, imggen_SEED, imggen_STEP, save_folder],
|
710 |
-
outputs=[views_image, cond_image, result_image],
|
711 |
-
).success(
|
712 |
-
fn=stage_3_v23,
|
713 |
-
inputs=[views_image, cond_image, imggen_SEED, save_folder, imggen_max_faces, imggen_color],
|
714 |
-
outputs=[result_3dobj, result_3dglb_texture],
|
715 |
-
).success(
|
716 |
-
fn=stage_3p_baking,
|
717 |
-
inputs=[save_folder, imggen_color, imggen_bake,
|
718 |
-
imggen_force_bake, imggen_front_baking, imggen_other_views, imggen_align_times],
|
719 |
-
outputs=[result_3dglb_baked],
|
720 |
-
).success(
|
721 |
-
fn=stage_4_gif,
|
722 |
-
inputs=[save_folder, imggen_color, imggen_bake, imggen_render],
|
723 |
-
outputs=[result_gif],
|
724 |
-
).success(lambda: print('Image_to_3D Done ...\n'))
|
725 |
-
|
726 |
#===============================================================
|
727 |
# start gradio server
|
728 |
#===============================================================
|
@@ -731,5 +564,6 @@ with gr.Blocks() as demo:
|
|
731 |
CONST_SERVER = '0.0.0.0'
|
732 |
|
733 |
demo.queue(max_size=CONST_MAX_QUEUE)
|
734 |
-
demo.launch()
|
|
|
735 |
|
|
|
34 |
from einops import rearrange
|
35 |
import pandas as pd
|
36 |
|
37 |
+
# import sys
|
38 |
+
# import spaces
|
39 |
+
# import subprocess
|
40 |
+
# from huggingface_hub import snapshot_download
|
41 |
+
|
42 |
+
# def install_cuda_toolkit():
|
43 |
+
# # CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"
|
44 |
+
# CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run"
|
45 |
+
# CUDA_TOOLKIT_FILE = "/tmp/%s" % os.path.basename(CUDA_TOOLKIT_URL)
|
46 |
+
# subprocess.call(["wget", "-q", CUDA_TOOLKIT_URL, "-O", CUDA_TOOLKIT_FILE])
|
47 |
+
# subprocess.call(["chmod", "+x", CUDA_TOOLKIT_FILE])
|
48 |
+
# subprocess.call([CUDA_TOOLKIT_FILE, "--silent", "--toolkit"])
|
49 |
+
|
50 |
+
# os.environ["CUDA_HOME"] = "/usr/local/cuda"
|
51 |
+
# os.environ["PATH"] = "%s/bin:%s" % (os.environ["CUDA_HOME"], os.environ["PATH"])
|
52 |
+
# os.environ["LD_LIBRARY_PATH"] = "%s/lib:%s" % (
|
53 |
+
# os.environ["CUDA_HOME"],
|
54 |
+
# "" if "LD_LIBRARY_PATH" not in os.environ else os.environ["LD_LIBRARY_PATH"],
|
55 |
+
# )
|
56 |
+
# # Fix: arch_list[-1] += '+PTX'; IndexError: list index out of range
|
57 |
+
# os.environ["TORCH_CUDA_ARCH_LIST"] = "8.0;8.6"
|
58 |
|
59 |
+
# def install_requirements():
|
60 |
+
# subprocess.check_call([sys.executable, "-m", "pip", "install", "git+https://github.com/NVlabs/nvdiffrast"])
|
61 |
+
# subprocess.check_call([sys.executable, "-m", "pip", "install", "git+https://github.com/facebookresearch/pytorch3d@stable"])
|
62 |
|
63 |
|
64 |
+
# def download_models():
|
65 |
+
# os.makedirs("weights", exist_ok=True)
|
66 |
+
# os.makedirs("weights/hunyuanDiT", exist_ok=True)
|
67 |
+
# os.makedirs("third_party/weights/DUSt3R_ViTLarge_BaseDecoder_512_dpt", exist_ok=True)
|
68 |
+
# try:
|
69 |
+
# snapshot_download(
|
70 |
+
# repo_id="tencent/Hunyuan3D-1",
|
71 |
+
# local_dir="./weights",
|
72 |
+
# resume_download=True
|
73 |
+
# )
|
74 |
+
# print("Successfully downloaded Hunyuan3D-1 model")
|
75 |
+
# except Exception as e:
|
76 |
+
# print(f"Error downloading Hunyuan3D-1: {e}")
|
77 |
+
# try:
|
78 |
+
# snapshot_download(
|
79 |
+
# repo_id="Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers-Distilled",
|
80 |
+
# local_dir="./weights/hunyuanDiT",
|
81 |
+
# resume_download=True
|
82 |
+
# )
|
83 |
+
# print("Successfully downloaded HunyuanDiT model")
|
84 |
+
# except Exception as e:
|
85 |
+
# print(f"Error downloading HunyuanDiT: {e}")
|
86 |
+
# try:
|
87 |
+
# snapshot_download(
|
88 |
+
# repo_id="naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt",
|
89 |
+
# local_dir="./third_party/weights/DUSt3R_ViTLarge_BaseDecoder_512_dpt",
|
90 |
+
# resume_download=True
|
91 |
+
# )
|
92 |
+
# print("Successfully downloaded DUSt3R model")
|
93 |
+
# except Exception as e:
|
94 |
+
# print(f"Error downloading DUSt3R: {e}")
|
95 |
|
96 |
+
# # install_cuda_toolkit()
|
97 |
+
# install_requirements()
|
98 |
+
# download_models() ### download weights !!!!
|
99 |
|
100 |
from infer import seed_everything, save_gif
|
101 |
from infer import Text2Image, Removebg, Image2Views, Views2Mesh, GifRenderer
|
|
|
207 |
print(f"mkdir {save_folder} suceess !!!")
|
208 |
return save_folder
|
209 |
|
210 |
+
|
211 |
+
# @spaces.GPU
|
212 |
+
def textgen_pipe(text, seed=0, step=0, SEED=0, STEP=0, color='face', bake=False, render=True, max_faces=12000,
|
213 |
+
force=False, front='auto', others=[180], align_times=3):
|
214 |
+
save_folder = gen_save_folder()
|
215 |
image = worker_t2i(text, seed, step)
|
216 |
+
image.save(save_folder + '/img.png')
|
217 |
img_nobg = worker_xbg(image, force=True)
|
218 |
+
img_nobg.save(save_folder + '/img_nobg.png')
|
219 |
+
yield img_nobg, None, None, None, None, None
|
220 |
+
res_img, pils = worker_i2v(img_nobg, SEED, STEP)
|
221 |
+
save_gif(pils, save_folder + '/views.gif')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
222 |
views_img, cond_img = res_img[0], res_img[1]
|
223 |
img_array = np.asarray(views_img, dtype=np.uint8)
|
224 |
show_img = rearrange(img_array, '(n h) (m w) c -> (n m) h w c', n=3, m=2)
|
225 |
+
show_img = rearrange(show_img[worker_i2v.order, ...], '(n m) h w c -> (n h) (m w) c', n=2, m=3)
|
226 |
+
show_img = Image.fromarray(show_img)
|
227 |
+
yield img_nobg, show_img, None, None, None, None
|
228 |
+
do_texture_mapping = color == 'face'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
worker_v23(
|
230 |
+
views_img, cond_img, seed = SEED,
|
|
|
|
|
231 |
save_folder = save_folder,
|
232 |
+
target_face_count = max_faces,
|
233 |
do_texture_mapping = do_texture_mapping
|
234 |
)
|
235 |
+
glb_v23 = save_folder + '/mesh.glb' if do_texture_mapping else None
|
236 |
+
obj_v23 = save_folder + '/mesh.obj'
|
237 |
+
obj_v23 = save_folder + '/mesh_vertex_colors.obj'
|
238 |
+
yield img_nobg, show_img, obj_v23, glb_v23, None, None
|
239 |
+
glb_dst = None
|
240 |
+
if do_texture_mapping and bake:
|
|
|
|
|
241 |
obj_dst = worker_baker(save_folder, force, front, others, align_times)
|
242 |
glb_dst = obj_dst.replace(".obj", ".glb")
|
243 |
+
yield img_nobg, show_img, obj_v23, glb_v23, glb_dst, None
|
244 |
+
if not render:
|
245 |
+
yield img_nobg, show_img, obj_v23, glb_v23, glb_dst, None
|
246 |
else:
|
247 |
+
baked_obj_list = sorted(glob(save_folder + '/view_*/bake/mesh.obj'))
|
248 |
+
obj_dst = baked_obj_list[-1] if len(baked_obj_list)>=1 else save_folder+'/mesh.obj'
|
249 |
+
assert os.path.exists(obj_dst), f"{obj_dst} file not found"
|
250 |
+
gif_dst = obj_dst.replace(".obj", ".gif")
|
251 |
+
worker_gif(obj_dst, gif_dst_path=gif_dst)
|
252 |
+
yield img_nobg, show_img, obj_v23, glb_v23, glb_dst, gif_dst
|
253 |
+
|
254 |
+
# @spaces.GPU
|
255 |
+
def imggen_pipe(image, do_removebg=True, SEED=0, STEP=0, color='face', bake=False, render=True, max_faces=12000,
|
256 |
+
force=False, front='auto', others=[180], align_times=3):
|
257 |
+
save_folder = gen_save_folder()
|
258 |
+
if isinstance(image, str):image = Image.open(image)
|
259 |
+
img_nobg = worker_xbg(image, force=do_removebg)
|
260 |
+
img_nobg.save(save_folder + '/img_nobg.png')
|
261 |
+
yield img_nobg, None, None, None, None, None
|
262 |
+
res_img, pils = worker_i2v(img_nobg, SEED, STEP)
|
263 |
+
save_gif(pils, save_folder + '/views.gif')
|
264 |
+
views_img, cond_img = res_img[0], res_img[1]
|
265 |
+
img_array = np.asarray(views_img, dtype=np.uint8)
|
266 |
+
show_img = rearrange(img_array, '(n h) (m w) c -> (n m) h w c', n=3, m=2)
|
267 |
+
show_img = rearrange(show_img[worker_i2v.order, ...], '(n m) h w c -> (n h) (m w) c', n=2, m=3)
|
268 |
+
show_img = Image.fromarray(show_img)
|
269 |
+
yield img_nobg, show_img, None, None, None, None
|
270 |
+
do_texture_mapping = color == 'face'
|
271 |
+
worker_v23(
|
272 |
+
views_img, cond_img, seed = SEED,
|
273 |
+
save_folder = save_folder,
|
274 |
+
target_face_count = max_faces,
|
275 |
+
do_texture_mapping = do_texture_mapping
|
276 |
+
)
|
277 |
+
glb_v23 = save_folder + '/mesh.glb' if do_texture_mapping else None
|
278 |
+
obj_v23 = save_folder + '/mesh.obj'
|
279 |
+
obj_v23 = save_folder + '/mesh_vertex_colors.obj'
|
280 |
+
yield img_nobg, show_img, obj_v23, glb_v23, None, None
|
281 |
+
glb_dst = None
|
282 |
+
if do_texture_mapping and bake:
|
283 |
+
obj_dst = worker_baker(save_folder, force, front, others, align_times)
|
284 |
+
glb_dst = obj_dst.replace(".obj", ".glb")
|
285 |
+
yield img_nobg, show_img, obj_v23, glb_v23, glb_dst, None
|
286 |
+
if not render:
|
287 |
+
yield img_nobg, show_img, obj_v23, glb_v23, glb_dst, None
|
288 |
+
else:
|
289 |
+
baked_obj_list = sorted(glob(save_folder + '/view_*/bake/mesh.obj'))
|
290 |
+
obj_dst = baked_obj_list[-1] if len(baked_obj_list)>=1 else save_folder+'/mesh.obj'
|
291 |
+
assert os.path.exists(obj_dst), f"{obj_dst} file not found"
|
292 |
+
gif_dst = obj_dst.replace(".obj", ".gif")
|
293 |
+
worker_gif(obj_dst, gif_dst_path=gif_dst)
|
294 |
+
yield img_nobg, show_img, obj_v23, glb_v23, glb_dst, gif_dst
|
295 |
+
|
296 |
+
|
297 |
def check_image_available(image):
|
298 |
if image is None:
|
299 |
return "Please upload image", gr.update()
|
|
|
346 |
with gr.Blocks() as demo:
|
347 |
gr.Markdown(CONST_HEADER)
|
348 |
with gr.Row(variant="panel"):
|
|
|
|
|
|
|
349 |
with gr.Column(scale=2):
|
|
|
|
|
|
|
350 |
with gr.Tab("Text to 3D"):
|
351 |
with gr.Column():
|
352 |
text = gr.TextArea('一只黑白相间的熊猫在白色背景上居中坐着,呈现出卡通风格和可爱氛围。',
|
353 |
lines=3, max_lines=20, label='Input text (within 70 words)')
|
354 |
+
with gr.Row():
|
355 |
+
textgen_submit = gr.Button("Generate", variant="primary")
|
356 |
+
with gr.Row():
|
357 |
+
gr.Examples(examples=example_ts, inputs=[text], label="Text examples", examples_per_page=10)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
358 |
|
359 |
with gr.Tab("Image to 3D"):
|
360 |
with gr.Row():
|
|
|
362 |
image_mode="RGBA", sources="upload", interactive=True)
|
363 |
with gr.Row():
|
364 |
alert_message = gr.Markdown("") # for warning
|
365 |
+
with gr.Row():
|
366 |
+
gr.Examples(examples=example_is, inputs=[input_image],
|
367 |
+
label="Img examples", examples_per_page=10)
|
368 |
+
with gr.Row():
|
369 |
+
removebg = gr.Checkbox(
|
370 |
+
label="Remove Background",
|
371 |
+
value=True,
|
372 |
+
interactive=True
|
373 |
+
)
|
374 |
+
imggen_submit = gr.Button("Generate", variant="primary")
|
375 |
|
376 |
+
mode = gr.Radio(
|
377 |
+
choices=['Vertex color', 'Face color', 'Baking'],
|
378 |
+
label="Texture mode",
|
379 |
+
value='Baking',
|
380 |
+
interactive=True
|
381 |
+
)
|
382 |
+
|
383 |
+
with gr.Accordion("Custom settings", open=False):
|
384 |
+
color = gr.Radio(choices=["vertex", "face"], label="Color", value="face")
|
385 |
+
with gr.Row():
|
386 |
+
|
387 |
+
render = gr.Checkbox(
|
388 |
+
label="Do Rendering",
|
389 |
+
value=True,
|
390 |
+
interactive=True
|
391 |
+
)
|
392 |
+
bake = gr.Checkbox(
|
393 |
+
label="Do Baking",
|
394 |
+
value=True if BAKE_AVAILEBLE else False,
|
395 |
+
interactive=True if BAKE_AVAILEBLE else False
|
396 |
+
)
|
397 |
+
|
398 |
+
with gr.Row():
|
399 |
+
seed = gr.Number(value=0, label="T2I seed", precision=0, interactive=True)
|
400 |
+
SEED = gr.Number(value=0, label="Gen seed", precision=0, interactive=True)
|
401 |
+
|
402 |
+
step = gr.Slider(
|
403 |
+
value=25,
|
404 |
+
minimum=15,
|
405 |
+
maximum=50,
|
406 |
+
step=1,
|
407 |
+
label="T2I steps",
|
408 |
+
interactive=True
|
409 |
+
)
|
410 |
+
STEP = gr.Slider(
|
411 |
+
value=50,
|
412 |
+
minimum=20,
|
413 |
+
maximum=80,
|
414 |
+
step=1,
|
415 |
+
label="Gen steps",
|
416 |
+
interactive=True
|
417 |
+
)
|
418 |
+
max_faces = gr.Slider(
|
419 |
+
value=10000,
|
420 |
+
minimum=2000,
|
421 |
+
maximum=60000,
|
422 |
+
step=1000,
|
423 |
+
label="Face number limit",
|
424 |
interactive=True
|
425 |
)
|
|
|
|
|
|
|
426 |
|
427 |
+
with gr.Accordion("Baking Options", open=False):
|
428 |
+
force_bake = gr.Checkbox(
|
429 |
+
label="Force (Ignore the degree of matching)",
|
430 |
+
value=False,
|
|
|
|
|
|
|
|
|
|
|
431 |
interactive=True
|
432 |
)
|
433 |
+
front_baking = gr.Radio(
|
434 |
+
choices=['input image', 'multi-view front view', 'auto'],
|
435 |
+
label="Front view baking",
|
436 |
+
value='auto',
|
437 |
+
interactive=True,
|
438 |
+
visible=True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
439 |
)
|
440 |
+
other_views = gr.CheckboxGroup(
|
441 |
+
choices=['60°', '120°', '180°', '240°', '300°'],
|
442 |
+
label="Other views baking",
|
443 |
+
value=['180°'],
|
444 |
+
interactive=True,
|
445 |
+
visible=True
|
446 |
+
)
|
447 |
+
align_times =gr.Slider(
|
448 |
+
value=1,
|
449 |
+
minimum=1,
|
450 |
+
maximum=5,
|
451 |
+
step=1,
|
452 |
+
label="Number of alignment attempts per view",
|
453 |
interactive=True
|
454 |
)
|
455 |
|
456 |
+
input_image.change(
|
457 |
+
fn=check_image_available,
|
458 |
+
inputs=input_image,
|
459 |
+
outputs=[alert_message, removebg]
|
460 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
461 |
|
462 |
+
mode.change(
|
463 |
+
fn=update_mode,
|
464 |
+
inputs=mode,
|
465 |
+
outputs=[color, bake, max_faces, render]
|
466 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
467 |
|
468 |
+
color.change(
|
469 |
+
fn=lambda x:[
|
470 |
+
gr.update(value=(x=='face'), interactive=(x=='face'), visible=(x=='face')),
|
471 |
+
gr.update(value=(x=='face'), interactive=(x=='face'), visible=(x=='face'))
|
472 |
+
],
|
473 |
+
inputs=color,
|
474 |
+
outputs=[bake, render]
|
475 |
+
)
|
476 |
|
477 |
+
bake.change(
|
478 |
+
fn= lambda x:[
|
479 |
+
gr.update(visible=x), gr.update(visible=x), gr.update(visible=x), gr.update(visible=x),
|
480 |
+
gr.update(value=10000 if x else 120000, minimum=2000, maximum=60000 if x else 300000)
|
481 |
+
],
|
482 |
+
inputs=bake,
|
483 |
+
outputs=[front_baking, other_views, align_times,force_bake, max_faces]
|
484 |
+
)
|
485 |
+
|
486 |
gr.Markdown(CONST_NOTE)
|
487 |
|
488 |
###### Output region
|
|
|
490 |
with gr.Column(scale=3):
|
491 |
with gr.Row():
|
492 |
with gr.Column(scale=2):
|
493 |
+
rembg_image = gr.Image(
|
494 |
label="Image without background",
|
495 |
type="pil",
|
496 |
image_mode="RGBA",
|
|
|
542 |
# gradio running code
|
543 |
#===============================================================
|
544 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
545 |
textgen_submit.click(
|
546 |
+
fn=textgen_pipe,
|
547 |
+
inputs=[text, seed, step, SEED, STEP, color, bake, render, max_faces, force_bake,
|
548 |
+
front_baking, other_views, align_times],
|
549 |
+
outputs=[rembg_image, result_image, result_3dobj, result_3dglb_texture, result_3dglb_baked, result_gif],
|
550 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
551 |
|
552 |
imggen_submit.click(
|
553 |
+
fn=imggen_pipe,
|
554 |
+
inputs=[input_image, removebg, SEED, STEP, color, bake, render, max_faces, force_bake,
|
555 |
+
front_baking, other_views, align_times],
|
556 |
+
outputs=[rembg_image, result_image, result_3dobj, result_3dglb_texture, result_3dglb_baked, result_gif],
|
557 |
+
)
|
558 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
559 |
#===============================================================
|
560 |
# start gradio server
|
561 |
#===============================================================
|
|
|
564 |
CONST_SERVER = '0.0.0.0'
|
565 |
|
566 |
demo.queue(max_size=CONST_MAX_QUEUE)
|
567 |
+
demo.launch(server_name=CONST_SERVER, server_port=CONST_PORT)
|
568 |
+
# demo.launch()
|
569 |
|