Zhouyan248 commited on
Commit
dd310fc
1 Parent(s): e180512

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -12
app.py CHANGED
@@ -25,13 +25,9 @@ h1 {
25
  def infer(prompt, image_inp, seed_inp, ddim_steps,width,height):
26
  setup_seed(seed_inp)
27
  args.num_sampling_steps = ddim_steps
28
- ###先测试Image的返回类型
29
- print(prompt, seed_inp, ddim_steps, type(image_inp))
30
  img = cv2.imread(image_inp)
31
- new_size = [height,width]
32
-
33
  args.image_size = new_size
34
-
35
  vae, model, text_encoder, diffusion = model_i2v_fun(args)
36
  vae.to(device)
37
  model.to(device)
@@ -58,17 +54,14 @@ def infer(prompt, image_inp, seed_inp, ddim_steps,width,height):
58
  video_ = ((video_clip * 0.5 + 0.5) * 255).add_(0.5).clamp_(0, 255).to(dtype=torch.uint8).cpu().permute(0, 2, 3, 1)
59
  torchvision.io.write_video(os.path.join(args.save_img_path, prompt+ '.mp4'), video_, fps=8)
60
 
61
-
62
-
63
- # video = model_i2V(prompt, image_inp, seed_inp, ddim_steps)
64
 
65
  return os.path.join(args.save_img_path, prompt+ '.mp4')
66
 
67
 
68
 
69
- def clean():
70
  # return gr.Image.update(value=None, visible=False), gr.Video.update(value=None)
71
- return gr.Video.update(value=None)
72
 
73
 
74
  title = """
@@ -118,7 +111,7 @@ with gr.Blocks(css='style.css') as demo:
118
 
119
 
120
  submit_btn = gr.Button("Generate video")
121
- clean_btn = gr.Button("Clean video")
122
 
123
  video_out = gr.Video(label="Video result", elem_id="video-output", width = 800)
124
  inputs = [prompt,image_inp, seed_inp, ddim_steps,width,height]
@@ -137,7 +130,7 @@ with gr.Blocks(css='style.css') as demo:
137
  ex.dataset.headers = [""]
138
 
139
  # control_task.change(change_task_options, inputs=[control_task], outputs=[canny_opt, hough_opt, normal_opt], queue=False)
140
- clean_btn.click(clean, inputs=[], outputs=[video_out], queue=False)
141
  submit_btn.click(infer, inputs, outputs)
142
  # share_button.click(None, [], [], _js=share_js)
143
 
 
25
  def infer(prompt, image_inp, seed_inp, ddim_steps,width,height):
26
  setup_seed(seed_inp)
27
  args.num_sampling_steps = ddim_steps
 
 
28
  img = cv2.imread(image_inp)
29
+ new_size = [height,width]
 
30
  args.image_size = new_size
 
31
  vae, model, text_encoder, diffusion = model_i2v_fun(args)
32
  vae.to(device)
33
  model.to(device)
 
54
  video_ = ((video_clip * 0.5 + 0.5) * 255).add_(0.5).clamp_(0, 255).to(dtype=torch.uint8).cpu().permute(0, 2, 3, 1)
55
  torchvision.io.write_video(os.path.join(args.save_img_path, prompt+ '.mp4'), video_, fps=8)
56
 
 
 
 
57
 
58
  return os.path.join(args.save_img_path, prompt+ '.mp4')
59
 
60
 
61
 
62
+ # def clean():
63
  # return gr.Image.update(value=None, visible=False), gr.Video.update(value=None)
64
+ # return gr.Video.update(value=None)
65
 
66
 
67
  title = """
 
111
 
112
 
113
  submit_btn = gr.Button("Generate video")
114
+ # clean_btn = gr.Button("Clean video")
115
 
116
  video_out = gr.Video(label="Video result", elem_id="video-output", width = 800)
117
  inputs = [prompt,image_inp, seed_inp, ddim_steps,width,height]
 
130
  ex.dataset.headers = [""]
131
 
132
  # control_task.change(change_task_options, inputs=[control_task], outputs=[canny_opt, hough_opt, normal_opt], queue=False)
133
+ # clean_btn.click(clean, inputs=[], outputs=[video_out], queue=False)
134
  submit_btn.click(infer, inputs, outputs)
135
  # share_button.click(None, [], [], _js=share_js)
136