video / app.py
akazmi's picture
Create app.py
62e2507 verified
raw
history blame
1.85 kB
# Install required libraries
# pip install -U openai-whisper gtts
import whisper
from gtts import gTTS
import subprocess
# Step 1: Extract Text from Video using Whisper
def extract_text_from_video(video_path):
# Load the Whisper model
model = whisper.load_model("base")
# Transcribe the video file's audio
result = model.transcribe(video_path)
# Extract the transcribed text
text = result["text"]
# Save the transcribed text to a file (optional)
with open("video_text.txt", "w") as f:
f.write(text)
return text
# Step 2: Generate Voice-Over using gTTS
def generate_voice_over(text, output_audio_path="voice_over.mp3"):
# Generate audio with gTTS
tts = gTTS(text=text, lang="en")
tts.save(output_audio_path)
print(f"Voice-over saved as {output_audio_path}")
return output_audio_path
# Step 3: Combine Voice-Over with Original Video using FFmpeg
def add_voice_over_to_video(video_path, audio_path, output_video_path="output_video_with_voice.mp4"):
# Use FFmpeg to combine video with new audio
ffmpeg_command = [
"ffmpeg",
"-i", video_path,
"-i", audio_path,
"-c:v", "copy",
"-map", "0:v:0",
"-map", "1:a:0",
"-shortest",
output_video_path
]
subprocess.run(ffmpeg_command)
print(f"Final video with voice-over saved as {output_video_path}")
# Run the complete process
def main(video_path):
# Step 1: Extract text from video
text = extract_text_from_video(video_path)
print("Extracted Text:", text)
# Step 2: Generate voice-over from extracted text
audio_path = generate_voice_over(text)
# Step 3: Add voice-over to the video
add_voice_over_to_video(video_path, audio_path)
# Provide the path to your input video file
main("input_video.mp4")