DAI_Project / app.py
younes21000's picture
Update app.py
be55105 verified
raw
history blame
6.53 kB
import gradio as gr
import moviepy.editor as mp
import librosa
from transformers import pipeline
from concurrent.futures import ThreadPoolExecutor
import tempfile
import docx # To create Word documents
from moviepy.video.tools.subtitles import SubtitlesClip
from moviepy.editor import TextClip
import os
# Load Whisper model for speech-to-text (using smaller 'tiny' model for faster performance)
asr = pipeline("automatic-speech-recognition", model="openai/whisper-tiny") # Use 'whisper-tiny' for faster transcription
# MarianMT or M2M100 for translation (multi-language)
translator = pipeline("translation", model="facebook/m2m100_418M")
# Store generated subtitles and translations
subtitle_storage = {}
# Supported languages with their codes
languages = {
"Persian (fa)": "fa",
"French (fr)": "fr",
"Spanish (es)": "es",
"German (de)": "de",
"Chinese (zh)": "zh",
"Arabic (ar)": "ar",
"Hindi (hi)": "hi",
"Russian (ru)": "ru"
}
def transcribe_audio(chunk):
"""Transcribe a single audio chunk."""
return asr(chunk)["text"]
def add_subtitle(video):
try:
# Load video and extract audio for processing
video_path = video.name if video else None
if not video_path:
return "No video provided!"
video = mp.VideoFileClip(video_path)
audio = video.audio
# Use a temporary file for audio extraction
with tempfile.NamedTemporaryFile(delete=True, suffix='.wav') as tmp_audio_file:
audio.write_audiofile(tmp_audio_file.name, codec='pcm_s16le')
waveform, sr = librosa.load(tmp_audio_file.name, sr=16000)
# Transcribe in chunks (parallel)
chunk_duration = 15 # seconds
chunk_size = sr * chunk_duration
chunks = [waveform[i:i + chunk_size] for i in range(0, len(waveform), chunk_size) if len(waveform[i:i + chunk_size]) > 0]
with ThreadPoolExecutor() as executor:
transcriptions = list(executor.map(transcribe_audio, chunks))
full_transcription = " ".join(transcriptions)
subtitle_storage["original"] = full_transcription # Store the original subtitle
return f"Subtitle added: {full_transcription[:100]}..." # Display first 100 characters
except Exception as e:
return f"Error in adding subtitle: {e}"
def translate_subtitle(video):
try:
# Translate the stored subtitle
original_subtitle = subtitle_storage.get("original")
if not original_subtitle:
return "No subtitle to translate!"
translated_subtitle = translator(
original_subtitle,
src_lang="en", # Source language (assuming the subtitle is in English)
tgt_lang=languages["Persian (fa)"] # Set to the target language, here Persian
)[0]["translation_text"]
subtitle_storage["translated"] = translated_subtitle # Store the translated subtitle
return "Subtitle translated successfully!"
except Exception as e:
return f"Error in translating subtitle: {e}"
def download_word():
try:
# Save translated subtitles to a Word document
translated_subtitle = subtitle_storage.get("translated")
if not translated_subtitle:
return "No translated subtitle to save!"
doc = docx.Document()
doc.add_heading('Translated Subtitles', 0)
doc.add_paragraph(translated_subtitle)
file_path = "translated_subtitles.docx"
doc.save(file_path)
return f"Translated subtitles saved as Word document: {file_path}"
except Exception as e:
return f"Error in saving subtitles as Word: {e}"
def download_video():
try:
# Add subtitles to the video
original_subtitle = subtitle_storage.get("original")
translated_subtitle = subtitle_storage.get("translated")
if not original_subtitle or not translated_subtitle:
return "No subtitles to overlay on video!"
video_path = subtitle_storage.get("video_path")
video = mp.VideoFileClip(video_path)
# Function to generate subtitle text
generator = lambda txt: TextClip(txt, font='Arial', fontsize=24, color='white')
# Simulated subtitle time intervals for simplicity
subs = [(i * 5, i * 5 + 5, translated_subtitle[i:i+50]) for i in range(0, len(translated_subtitle), 50)]
# Create subtitle clips
subtitles = SubtitlesClip(subs, generator)
# Overlay subtitles on video
subtitled_video = mp.CompositeVideoClip([video, subtitles.set_position(('center', 'bottom'))])
output_video_path = "subtitled_video.mp4"
subtitled_video.write_videofile(output_video_path)
return f"Subtitled video is ready for download: {output_video_path}"
except Exception as e:
return f"Error in generating subtitled video: {e}"
# Gradio UI Interface
with gr.Blocks() as demo:
# Title
gr.Markdown("<h1 style='text-align: center;'>Video Subtitle Translator</h1>")
# Video Upload
with gr.Row():
video_input = gr.Video(label="Upload Video")
upload_button = gr.Button("Upload Video")
upload_status = gr.Textbox(label="Upload Status")
upload_button.click(add_subtitle, inputs=video_input, outputs=upload_status)
# Add Subtitle
with gr.Row():
add_subtitle_button = gr.Button("Add Subtitle")
subtitle_status = gr.Textbox(label="Subtitle Status")
add_subtitle_button.click(add_subtitle, inputs=video_input, outputs=subtitle_status)
# Translate Subtitle
with gr.Row():
translate_button = gr.Button("Translate Subtitle")
translate_status = gr.Textbox(label="Translation Status")
translate_button.click(translate_subtitle, inputs=video_input, outputs=translate_status)
# Download as Word
with gr.Row():
download_button = gr.Button("Download as Word")
download_status = gr.Textbox(label="Download Status")
download_button.click(download_word, inputs=None, outputs=download_status)
# Download Subtitled Video
with gr.Row():
download_video_button = gr.Button("Download Subtitled Video")
download_video_status = gr.Textbox(label="Download Video Status")
download_video_button.click(download_video, inputs=None, outputs=download_video_status)
# Launch the Gradio app
demo.launch()