ZeeAI1 commited on
Commit
ddcf2d4
·
verified ·
1 Parent(s): dccc97e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -12
app.py CHANGED
@@ -11,9 +11,9 @@ st.title("🚀 Text-to-Video (Zeroscope)")
11
  def load_model():
12
  pipe = DiffusionPipeline.from_pretrained(
13
  "cerspense/zeroscope_v2_576w",
14
- torch_dtype=torch.float16
15
  )
16
- pipe.enable_cpu_offload()
17
  return pipe
18
 
19
  pipe = load_model()
@@ -22,16 +22,9 @@ prompt = st.text_area("Enter prompt (short & descriptive):", max_chars=50)
22
 
23
  if st.button("Generate Video"):
24
  if prompt:
25
- with st.spinner("Generating... (this may take ~2-3 mins)"):
26
- video_frames = pipe(prompt, num_frames=10, height=320, width=576).frames
27
 
28
  video_filename = tempfile.mktemp(".mp4")
29
  clips = [ImageClip(np.array(frame)).set_duration(0.3) for frame in video_frames]
30
- final_clip = concatenate_videoclips(clips, method="compose")
31
- final_clip.write_videofile(video_filename, fps=5)
32
-
33
- st.video(video_filename)
34
-
35
- os.remove(video_filename)
36
- else:
37
- st.warning("Enter a prompt to generate a video.")
 
11
  def load_model():
12
  pipe = DiffusionPipeline.from_pretrained(
13
  "cerspense/zeroscope_v2_576w",
14
+ torch_dtype=torch.float32
15
  )
16
+ pipe.to("cpu")
17
  return pipe
18
 
19
  pipe = load_model()
 
22
 
23
  if st.button("Generate Video"):
24
  if prompt:
25
+ with st.spinner("Generating... (may take a few mins on CPU)"):
26
+ video_frames = pipe(prompt, num_frames=8, height=320, width=576).frames
27
 
28
  video_filename = tempfile.mktemp(".mp4")
29
  clips = [ImageClip(np.array(frame)).set_duration(0.3) for frame in video_frames]
30
+ final_clip = concatenate_videoclips(clips, method="compose")