fffiloni commited on
Commit
036a8c6
1 Parent(s): f8190cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -19
app.py CHANGED
@@ -3,33 +3,24 @@ import cv2
3
  import numpy as np
4
  import torch
5
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
6
- from moviepy.editor import ImageSequenceClip
7
 
8
  pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
9
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
10
  pipe.enable_model_cpu_offload()
11
 
12
- def export_to_video(video_frames):
13
- # Convert the nested list to a NumPy array and scale values to 0-255 range
14
- video_frames = np.array(video_frames)
15
- video_frames = (video_frames * 255).astype(np.uint8)
16
-
17
- # Convert frames from (N, H, W, C) to list of (H, W, C)
18
- frames_list = [frame for frame in video_frames]
19
-
20
- # Create a video clip from the frames
21
- fps = 24 # Frames per second
22
- clip = ImageSequenceClip(frames_list, fps=fps)
23
-
24
- # Write the video file
25
- clip.write_videofile("output_video.mp4", codec="libx264")
26
-
27
- print("Video has been created successfully.")
28
- return 'output_video.mp4'
29
 
30
  def infer(prompt):
31
  negative_prompt = "text, watermark, copyright, blurry, nsfw"
32
- video_frames = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
 
33
  video_path = export_to_video(video_frames)
34
  print(video_path)
35
  return video_path
 
3
  import numpy as np
4
  import torch
5
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
 
6
 
7
  pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
8
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
9
  pipe.enable_model_cpu_offload()
10
 
11
+ def export_to_video(frames: np.ndarray, fps: int) -> str:
12
+ frames = np.clip((frames * 255), 0, 255).astype(np.uint8)
13
+ out_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
14
+ writer = imageio.get_writer(out_file.name, format="FFMPEG", fps=fps)
15
+ for frame in frames:
16
+ writer.append_data(frame)
17
+ writer.close()
18
+ return out_file.name
 
 
 
 
 
 
 
 
 
19
 
20
  def infer(prompt):
21
  negative_prompt = "text, watermark, copyright, blurry, nsfw"
22
+ video_frames = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames[0]
23
+
24
  video_path = export_to_video(video_frames)
25
  print(video_path)
26
  return video_path