fffiloni commited on
Commit
f4bff68
1 Parent(s): 1e4c1fe

new video export

Browse files
Files changed (1) hide show
  1. app.py +26 -2
app.py CHANGED
@@ -1,16 +1,40 @@
1
  import gradio as gr
 
 
2
  import torch
3
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
4
- from diffusers.utils import export_to_video
5
 
6
  pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
7
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
8
  pipe.enable_model_cpu_offload()
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  def infer(prompt):
11
  negative_prompt = "text, watermark, copyright, blurry, nsfw"
12
  video_frames = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
13
- print(f"VIDEO FRAMES: {video_frames}")
14
  video_path = export_to_video(video_frames)
15
  print(video_path)
16
  return video_path
 
1
  import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
  import torch
5
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
 
6
 
7
  pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
8
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
9
  pipe.enable_model_cpu_offload()
10
 
11
+ def export_to_video(video_frames):
12
+ # Convert the nested list to a NumPy array and scale values to 0-255 range
13
+ video_frames = np.array(video_frames)
14
+ video_frames = (video_frames * 255).astype(np.uint8)
15
+
16
+ # Get the dimensions of the frames
17
+ height, width, channels = video_frames.shape[2:]
18
+
19
+ # Define the video writer object
20
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Codec for .mp4 files
21
+ fps = 24 # Frames per second
22
+ video_writer = cv2.VideoWriter('output_video.mp4', fourcc, fps, (width, height))
23
+
24
+ # Write each frame to the video
25
+ for i in range(video_frames.shape[0]):
26
+ frame = video_frames[i]
27
+ video_writer.write(frame)
28
+
29
+ # Release the video writer object
30
+ video_writer.release()
31
+
32
+ print("Video has been created successfully.")
33
+ return 'output_video.mp4'
34
+
35
  def infer(prompt):
36
  negative_prompt = "text, watermark, copyright, blurry, nsfw"
37
  video_frames = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
 
38
  video_path = export_to_video(video_frames)
39
  print(video_path)
40
  return video_path