Spaces:
Running
on
L40S
Running
on
L40S
BestWishYsh
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -175,7 +175,17 @@ def generate(
|
|
175 |
if rife_status:
|
176 |
video_pt = rife_inference_with_latents(frame_interpolation_model, video_pt)
|
177 |
|
178 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
|
180 |
|
181 |
def convert_to_gif(video_path):
|
@@ -337,7 +347,7 @@ with gr.Blocks() as demo:
|
|
337 |
rife_status,
|
338 |
progress=gr.Progress(track_tqdm=True)
|
339 |
):
|
340 |
-
|
341 |
prompt,
|
342 |
image_input,
|
343 |
num_inference_steps=50,
|
@@ -347,16 +357,6 @@ with gr.Blocks() as demo:
|
|
347 |
rife_status=rife_status,
|
348 |
)
|
349 |
|
350 |
-
batch_size = latents.shape[0]
|
351 |
-
batch_video_frames = []
|
352 |
-
for batch_idx in range(batch_size):
|
353 |
-
pt_image = latents[batch_idx]
|
354 |
-
pt_image = torch.stack([pt_image[i] for i in range(pt_image.shape[0])])
|
355 |
-
|
356 |
-
image_np = VaeImageProcessor.pt_to_numpy(pt_image)
|
357 |
-
image_pil = VaeImageProcessor.numpy_to_pil(image_np)
|
358 |
-
batch_video_frames.append(image_pil)
|
359 |
-
|
360 |
video_path = save_video(batch_video_frames[0], fps=math.ceil((len(batch_video_frames[0]) - 1) / 6))
|
361 |
video_update = gr.update(visible=True, value=video_path)
|
362 |
gif_path = convert_to_gif(video_path)
|
|
|
175 |
if rife_status:
|
176 |
video_pt = rife_inference_with_latents(frame_interpolation_model, video_pt)
|
177 |
|
178 |
+
batch_size = video_pt.shape[0]
|
179 |
+
batch_video_frames = []
|
180 |
+
for batch_idx in range(batch_size):
|
181 |
+
pt_image = video_pt[batch_idx]
|
182 |
+
pt_image = torch.stack([pt_image[i] for i in range(pt_image.shape[0])])
|
183 |
+
|
184 |
+
image_np = VaeImageProcessor.pt_to_numpy(pt_image)
|
185 |
+
image_pil = VaeImageProcessor.numpy_to_pil(image_np)
|
186 |
+
batch_video_frames.append(image_pil)
|
187 |
+
|
188 |
+
return (batch_video_frames, seed)
|
189 |
|
190 |
|
191 |
def convert_to_gif(video_path):
|
|
|
347 |
rife_status,
|
348 |
progress=gr.Progress(track_tqdm=True)
|
349 |
):
|
350 |
+
batch_video_frames, seed = generate(
|
351 |
prompt,
|
352 |
image_input,
|
353 |
num_inference_steps=50,
|
|
|
357 |
rife_status=rife_status,
|
358 |
)
|
359 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
360 |
video_path = save_video(batch_video_frames[0], fps=math.ceil((len(batch_video_frames[0]) - 1) / 6))
|
361 |
video_update = gr.update(visible=True, value=video_path)
|
362 |
gif_path = convert_to_gif(video_path)
|