Spaces:
Running
on
Zero
Running
on
Zero
bugfix
Browse files- app.py +47 -26
- models/pipelines.py +2 -2
app.py
CHANGED
@@ -317,6 +317,12 @@ def generate_tracking_cotracker(video_tensor, density=30):
|
|
317 |
@spaces.GPU(duration=240)
|
318 |
def apply_tracking_unified(video_tensor, tracking_tensor, repaint_img_tensor, prompt, fps):
|
319 |
"""统一的应用跟踪函数"""
|
|
|
|
|
|
|
|
|
|
|
|
|
320 |
try:
|
321 |
if video_tensor is None or tracking_tensor is None:
|
322 |
return None
|
@@ -328,7 +334,8 @@ def apply_tracking_unified(video_tensor, tracking_tensor, repaint_img_tensor, pr
|
|
328 |
tracking_tensor=tracking_tensor,
|
329 |
img_cond_tensor=repaint_img_tensor,
|
330 |
prompt=prompt,
|
331 |
-
checkpoint_path=DEFAULT_MODEL_PATH
|
|
|
332 |
)
|
333 |
|
334 |
print(f"生成的视频路径: {output_path}")
|
@@ -464,27 +471,17 @@ def load_examples():
|
|
464 |
with gr.Blocks(title="Diffusion as Shader") as demo:
|
465 |
gr.Markdown("# Diffusion as Shader Web UI")
|
466 |
gr.Markdown("### [Project Page](https://igl-hkust.github.io/das/) | [GitHub](https://github.com/IGL-HKUST/DiffusionAsShader)")
|
467 |
-
|
468 |
# 创建隐藏状态变量来存储中间结果
|
469 |
video_tensor_state = gr.State(None)
|
470 |
tracking_tensor_state = gr.State(None)
|
471 |
repaint_img_tensor_state = gr.State(None)
|
472 |
fps_state = gr.State(None)
|
473 |
-
|
474 |
with gr.Row():
|
475 |
left_column = gr.Column(scale=1)
|
476 |
right_column = gr.Column(scale=1)
|
477 |
|
478 |
-
with right_column:
|
479 |
-
|
480 |
-
gr.Markdown("### 4. Generate Tracking Video")
|
481 |
-
gr.Markdown("'Generate Tracking Video' is used to preserve all motion from the Source. You need to generate tracking video before producing the final result.")
|
482 |
-
mt_run_btn = gr.Button("Generate Tracking", variant="primary", size="lg")
|
483 |
-
tracking_video = gr.Video(label="Tracking Video")
|
484 |
-
|
485 |
-
apply_tracking_btn = gr.Button("5. Generate Video", variant="primary", size="lg", interactive=False)
|
486 |
-
output_video = gr.Video(label="Generated Video")
|
487 |
-
|
488 |
with left_column:
|
489 |
gr.Markdown("### 1. Upload Source")
|
490 |
gr.Markdown("Upload a video or image, We will extract the motion and space structure from it")
|
@@ -531,25 +528,49 @@ with gr.Blocks(title="Diffusion as Shader") as demo:
|
|
531 |
outputs=[mt_repaint_preview]
|
532 |
)
|
533 |
|
534 |
-
mt_run_btn.click(
|
535 |
-
fn=process_motion_transfer,
|
536 |
-
inputs=[
|
537 |
-
source_upload, common_prompt,
|
538 |
-
mt_repaint_option, mt_repaint_upload
|
539 |
-
],
|
540 |
-
outputs=[tracking_video, video_tensor_state, tracking_tensor_state, repaint_img_tensor_state, fps_state]
|
541 |
-
).then(
|
542 |
-
fn=enable_apply_button,
|
543 |
-
inputs=[tracking_video],
|
544 |
-
outputs=[apply_tracking_btn]
|
545 |
-
)
|
546 |
-
|
547 |
with gr.TabItem("Camera Control"):
|
548 |
gr.Markdown("Camera Control is not available in Huggingface Space, please deploy our [GitHub project](https://github.com/IGL-HKUST/DiffusionAsShader) on your own machine")
|
549 |
|
550 |
with gr.TabItem("Object Manipulation"):
|
551 |
gr.Markdown("Object Manipulation is not available in Huggingface Space, please deploy our [GitHub project](https://github.com/IGL-HKUST/DiffusionAsShader) on your own machine")
|
552 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
553 |
examples_list = load_examples()
|
554 |
gr.Markdown("### Examples (For Workflow Demo Only)")
|
555 |
gr.Markdown("The following examples are only for demonstrating DaS's workflow and output quality. If you want to actually generate tracking or videos, the program will not run unless you manually upload files from your devices.")
|
|
|
317 |
@spaces.GPU(duration=240)
|
318 |
def apply_tracking_unified(video_tensor, tracking_tensor, repaint_img_tensor, prompt, fps):
|
319 |
"""统一的应用跟踪函数"""
|
320 |
+
print("--- Entering apply_tracking_unified ---") # 添加打印语句
|
321 |
+
print(f"Prompt received: {prompt}") # 添加打印语句
|
322 |
+
print(f"FPS received: {fps}") # 添加打印语句
|
323 |
+
print(f"Video tensor shape: {video_tensor.shape if video_tensor is not None else None}") # 添加打印语句
|
324 |
+
print(f"Tracking tensor shape: {tracking_tensor.shape if tracking_tensor is not None else None}") # 添加打印语句
|
325 |
+
print(f"Repaint tensor shape: {repaint_img_tensor.shape if repaint_img_tensor is not None else None}") # 添加打印语句
|
326 |
try:
|
327 |
if video_tensor is None or tracking_tensor is None:
|
328 |
return None
|
|
|
334 |
tracking_tensor=tracking_tensor,
|
335 |
img_cond_tensor=repaint_img_tensor,
|
336 |
prompt=prompt,
|
337 |
+
checkpoint_path=DEFAULT_MODEL_PATH,
|
338 |
+
num_inference_steps=15
|
339 |
)
|
340 |
|
341 |
print(f"生成的视频路径: {output_path}")
|
|
|
471 |
with gr.Blocks(title="Diffusion as Shader") as demo:
|
472 |
gr.Markdown("# Diffusion as Shader Web UI")
|
473 |
gr.Markdown("### [Project Page](https://igl-hkust.github.io/das/) | [GitHub](https://github.com/IGL-HKUST/DiffusionAsShader)")
|
474 |
+
|
475 |
# 创建隐藏状态变量来存储中间结果
|
476 |
video_tensor_state = gr.State(None)
|
477 |
tracking_tensor_state = gr.State(None)
|
478 |
repaint_img_tensor_state = gr.State(None)
|
479 |
fps_state = gr.State(None)
|
480 |
+
|
481 |
with gr.Row():
|
482 |
left_column = gr.Column(scale=1)
|
483 |
right_column = gr.Column(scale=1)
|
484 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
485 |
with left_column:
|
486 |
gr.Markdown("### 1. Upload Source")
|
487 |
gr.Markdown("Upload a video or image, We will extract the motion and space structure from it")
|
|
|
528 |
outputs=[mt_repaint_preview]
|
529 |
)
|
530 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
531 |
with gr.TabItem("Camera Control"):
|
532 |
gr.Markdown("Camera Control is not available in Huggingface Space, please deploy our [GitHub project](https://github.com/IGL-HKUST/DiffusionAsShader) on your own machine")
|
533 |
|
534 |
with gr.TabItem("Object Manipulation"):
|
535 |
gr.Markdown("Object Manipulation is not available in Huggingface Space, please deploy our [GitHub project](https://github.com/IGL-HKUST/DiffusionAsShader) on your own machine")
|
536 |
|
537 |
+
with right_column:
|
538 |
+
|
539 |
+
gr.Markdown("### 4. Generate Tracking Video")
|
540 |
+
gr.Markdown("'Generate Tracking Video' is used to preserve all motion from the Source. You need to generate tracking video before producing the final result.")
|
541 |
+
mt_run_btn = gr.Button("Generate Tracking", variant="primary", size="lg")
|
542 |
+
tracking_video = gr.Video(label="Tracking Video")
|
543 |
+
|
544 |
+
apply_tracking_btn = gr.Button("5. Generate Video", variant="primary", size="lg", interactive=False)
|
545 |
+
output_video = gr.Video(label="Generated Video")
|
546 |
+
|
547 |
+
# mt_run_btn 的 click 事件定义
|
548 |
+
mt_run_btn.click(
|
549 |
+
fn=process_motion_transfer,
|
550 |
+
inputs=[
|
551 |
+
source_upload, common_prompt,
|
552 |
+
mt_repaint_option, mt_repaint_upload
|
553 |
+
],
|
554 |
+
outputs=[tracking_video, video_tensor_state, tracking_tensor_state, repaint_img_tensor_state, fps_state]
|
555 |
+
).then(
|
556 |
+
fn=enable_apply_button,
|
557 |
+
inputs=[tracking_video],
|
558 |
+
outputs=[apply_tracking_btn]
|
559 |
+
)
|
560 |
+
|
561 |
+
# apply_tracking_btn 的 click 事件定义
|
562 |
+
apply_tracking_btn.click(
|
563 |
+
fn=apply_tracking_unified,
|
564 |
+
inputs=[
|
565 |
+
video_tensor_state,
|
566 |
+
tracking_tensor_state,
|
567 |
+
repaint_img_tensor_state,
|
568 |
+
common_prompt, # common_prompt 现在可用
|
569 |
+
fps_state
|
570 |
+
],
|
571 |
+
outputs=[output_video]
|
572 |
+
)
|
573 |
+
|
574 |
examples_list = load_examples()
|
575 |
gr.Markdown("### Examples (For Workflow Demo Only)")
|
576 |
gr.Markdown("The following examples are only for demonstrating DaS's workflow and output quality. If you want to actually generate tracking or videos, the program will not run unless you manually upload files from your devices.")
|
models/pipelines.py
CHANGED
@@ -570,7 +570,7 @@ class DiffusionAsShaderPipeline:
|
|
570 |
return tracking_path, tracking_video
|
571 |
|
572 |
|
573 |
-
def apply_tracking(self, video_tensor, fps=8, tracking_tensor=None, img_cond_tensor=None, prompt=None, checkpoint_path=None):
|
574 |
"""Generate final video with motion transfer
|
575 |
|
576 |
Args:
|
@@ -595,7 +595,7 @@ class DiffusionAsShaderPipeline:
|
|
595 |
tracking_tensor=tracking_tensor,
|
596 |
image_tensor=img_cond_tensor,
|
597 |
output_path=final_output,
|
598 |
-
num_inference_steps=
|
599 |
guidance_scale=6.0,
|
600 |
dtype=torch.bfloat16,
|
601 |
fps=self.fps
|
|
|
570 |
return tracking_path, tracking_video
|
571 |
|
572 |
|
573 |
+
def apply_tracking(self, video_tensor, fps=8, tracking_tensor=None, img_cond_tensor=None, prompt=None, checkpoint_path=None, num_inference_steps=15):
|
574 |
"""Generate final video with motion transfer
|
575 |
|
576 |
Args:
|
|
|
595 |
tracking_tensor=tracking_tensor,
|
596 |
image_tensor=img_cond_tensor,
|
597 |
output_path=final_output,
|
598 |
+
num_inference_steps=num_inference_steps,
|
599 |
guidance_scale=6.0,
|
600 |
dtype=torch.bfloat16,
|
601 |
fps=self.fps
|