ZeeAI1 commited on
Commit
f81fa21
·
verified ·
1 Parent(s): 1c75460

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -47
app.py CHANGED
@@ -1,54 +1,33 @@
1
  import streamlit as st
2
  from moviepy.editor import *
3
- from transformers import pipeline
4
  from TTS.api import TTS
5
  import tempfile, os
6
 
7
- st.title("📝 Text-to-Video App with Voice Clone")
8
 
9
- # Caching for faster reloads
10
  @st.cache_resource()
11
- def load_models():
12
- video_gen = pipeline('text-to-video-generation', model='cerspense/zeroscope_v2_XS') # extra small version
13
- tts_model = TTS("tts_models/en/ljspeech/tacotron2-DDC", progress_bar=False) # lighter TTS model
14
- return video_gen, tts_model
15
-
16
- video_gen, tts_model = load_models()
17
-
18
- # Input
19
- input_text = st.text_area("Enter short text (max 100 chars):", max_chars=100)
20
- voice_file = st.file_uploader("Upload your voice sample (short WAV):", type=["wav"])
21
-
22
- if st.button("Generate"):
23
- if input_text and voice_file:
24
- with st.spinner("Creating video (may take a minute)..."):
25
- # Short video (15 frames only)
26
- video_output = video_gen(input_text, num_frames=15)
27
- video_tensor = video_output["video"]
28
- video_np = (video_tensor * 255).astype('uint8')
29
-
30
- video_filename = tempfile.mktemp(suffix=".mp4")
31
- clips = [ImageClip(frame).set_duration(0.2) for frame in video_np]
32
- video_clip = concatenate_videoclips(clips)
33
- video_clip.write_videofile(video_filename, fps=5)
34
-
35
- # Short audio clip
36
- voice_path = tempfile.mktemp(suffix=".wav")
37
- audio_filename = tempfile.mktemp(suffix=".wav")
38
- with open(voice_path, "wb") as f:
39
- f.write(voice_file.read())
40
-
41
- tts_model.tts_to_file(text=input_text, speaker_wav=voice_path, file_path=audio_filename)
42
-
43
- # Combine video and audio
44
- final_clip = VideoFileClip(video_filename).set_audio(AudioFileClip(audio_filename))
45
- final_video_path = tempfile.mktemp(suffix=".mp4")
46
- final_clip.write_videofile(final_video_path, fps=5)
47
-
48
- st.video(final_video_path)
49
-
50
- # Cleanup
51
- for f in [video_filename, audio_filename, voice_path, final_video_path]:
52
- os.remove(f)
53
- else:
54
- st.warning("Provide both text and a voice sample.")
 
1
  import streamlit as st
2
  from moviepy.editor import *
 
3
  from TTS.api import TTS
4
  import tempfile, os
5
 
6
+ st.title("Simple Text-to-Video App")
7
 
 
8
  @st.cache_resource()
9
+ def load_tts():
10
+ return TTS("tts_models/en/ljspeech/tacotron2-DDC", progress_bar=False)
11
+
12
+ tts_model = load_tts()
13
+
14
+ input_text = st.text_area("Enter very short text (max 100 chars):", max_chars=100)
15
+
16
+ if st.button("Generate Simple Video") and input_text:
17
+ with st.spinner("Generating..."):
18
+ # Audio Only (no cloning, fast)
19
+ audio_filename = tempfile.mktemp(".wav")
20
+ tts_model.tts_to_file(text=input_text, file_path=audio_filename)
21
+
22
+ # Single static image as video (for demonstration)
23
+ img_clip = ColorClip(size=(320, 240), color=(0, 0, 255)).set_duration(5)
24
+ audio_clip = AudioFileClip(audio_filename)
25
+ final_clip = img_clip.set_audio(audio_clip)
26
+
27
+ final_video_path = tempfile.mktemp(".mp4")
28
+ final_clip.write_videofile(final_video_path, fps=5)
29
+
30
+ st.video(final_video_path)
31
+
32
+ os.remove(audio_filename)
33
+ os.remove(final_video_path)