ZeeAI1 commited on
Commit
9e806af
·
verified ·
1 Parent(s): 14bf962

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -13
app.py CHANGED
@@ -1,27 +1,32 @@
1
  import streamlit as st
 
 
2
  from moviepy.editor import *
 
3
  import tempfile, os
4
- from transformers import pipeline
5
 
6
- st.title("Simplified Text-to-Video (No TTS temporarily)")
7
 
8
- @st.cache_resource()
9
  def load_model():
10
- return pipeline('text-to-video-generation', model='cerspense/zeroscope_v2_576w')
 
 
 
 
 
11
 
12
- video_gen = load_model()
13
 
14
- input_text = st.text_area("Enter short text (max 50 chars):", max_chars=50)
15
 
16
  if st.button("Generate Video"):
17
- if input_text:
18
- with st.spinner("Generating video..."):
19
- video_output = video_gen(input_text, num_frames=10)
20
- video_tensor = video_output["video"]
21
- video_np = (video_tensor * 255).astype('uint8')
22
 
23
  video_filename = tempfile.mktemp(".mp4")
24
- clips = [ImageClip(frame).set_duration(0.3) for frame in video_np]
25
  final_clip = concatenate_videoclips(clips, method="compose")
26
  final_clip.write_videofile(video_filename, fps=5)
27
 
@@ -29,4 +34,4 @@ if st.button("Generate Video"):
29
 
30
  os.remove(video_filename)
31
  else:
32
- st.warning("Please enter text.")
 
1
  import streamlit as st
2
+ from diffusers import DiffusionPipeline
3
+ import torch
4
  from moviepy.editor import *
5
+ import numpy as np
6
  import tempfile, os
 
7
 
8
+ st.title("🚀 Text-to-Video (Zeroscope)")
9
 
10
+ @st.cache_resource
11
  def load_model():
12
+ pipe = DiffusionPipeline.from_pretrained(
13
+ "cerspense/zeroscope_v2_576w",
14
+ torch_dtype=torch.float16
15
+ )
16
+ pipe.enable_cpu_offload()
17
+ return pipe
18
 
19
+ pipe = load_model()
20
 
21
+ prompt = st.text_area("Enter prompt (short & descriptive):", max_chars=50)
22
 
23
  if st.button("Generate Video"):
24
+ if prompt:
25
+ with st.spinner("Generating... (this may take ~2-3 mins)"):
26
+ video_frames = pipe(prompt, num_frames=10, height=320, width=576).frames
 
 
27
 
28
  video_filename = tempfile.mktemp(".mp4")
29
+ clips = [ImageClip(np.array(frame)).set_duration(0.3) for frame in video_frames]
30
  final_clip = concatenate_videoclips(clips, method="compose")
31
  final_clip.write_videofile(video_filename, fps=5)
32
 
 
34
 
35
  os.remove(video_filename)
36
  else:
37
+ st.warning("Enter a prompt to generate a video.")