Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
from subprocess import run | |
from faster_whisper import WhisperModel | |
import json | |
import tempfile | |
import os # Importando o módulo os | |
# Carregar mapeamento de idiomas | |
with open('language_codes.json', 'r') as f: | |
lang_codes = json.load(f) | |
# Inicializar modelos | |
tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M") | |
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M") | |
whisper_model = WhisperModel("large-v2", device="cuda", compute_type="float16") | |
def process_video(radio, video, target_language): | |
# 1. Extrair áudio | |
audio_file = tempfile.NamedTemporaryFile(suffix=".wav").name | |
run(["ffmpeg", "-i", video.name, audio_file]) | |
# 2. Transcrição | |
segments, _ = whisper_model.transcribe(audio_file, beam_size=5) # Usando audio_file | |
segments = list(segments) | |
# Criar o arquivo .srt com carimbos de tempo | |
temp_transcript_file = tempfile.NamedTemporaryFile(delete=False, suffix=".srt") | |
with open(temp_transcript_file.name, "w", encoding="utf-8") as f: | |
counter = 1 | |
for segment in segments: | |
start_minutes = int(segment.start // 60) | |
start_seconds = int(segment.start % 60) | |
start_milliseconds = int((segment.start - int(segment.start)) * 1000) | |
end_minutes = int(segment.end // 60) | |
end_seconds = int(segment.end % 60) | |
end_milliseconds = int((segment.end - int(segment.end)) * 1000) | |
formatted_start = f"{start_minutes:02d}:{start_seconds:02d},{start_milliseconds:03d}" | |
formatted_end = f"{end_minutes:02d}:{end_seconds:02d},{end_milliseconds:03d}" | |
f.write(f"{counter}\n") | |
f.write(f"{formatted_start} --> {formatted_end}\n") | |
f.write(f"{segment.text}\n\n") | |
counter += 1 | |
# 3. Tradução | |
flores_code = lang_codes.get(target_language, "eng_Latn") # Definindo flores_code | |
temp_translated_file = tempfile.NamedTemporaryFile(delete=False, suffix=".srt") | |
with open(temp_transcript_file.name, "r", encoding="utf-8") as infile, open(temp_translated_file.name, "w", encoding="utf-8") as outfile: | |
for line in infile: | |
if line.strip().isnumeric() or "-->" in line: | |
outfile.write(line) | |
elif line.strip() != "": | |
inputs = tokenizer(line.strip(), return_tensors="pt") | |
translated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id[flores_code], max_length=100) | |
translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0] | |
outfile.write(translated_text + "\n") | |
else: | |
outfile.write("\n") | |
# 5. Incorporar legenda | |
output_video = "output_video.mp4" # Definindo output_video | |
run(["ffmpeg", "-i", video.name, "-vf", f"subtitles={temp_translated_file.name}", output_video]) | |
os.unlink(temp_transcript_file.name) | |
os.unlink(temp_translated_file.name) | |
return output_video # Retornando output_video | |
# Interface Gradio | |
iface = gr.Interface( | |
fn=process_video, | |
inputs=[ | |
gr.Video(), | |
gr.Dropdown(choices=list(lang_codes.keys()), label="Target Language for Dubbing", value="English"), | |
], | |
outputs=gr.Video(), | |
live=False, | |
title="AI Video Dubbing" | |
) | |
iface.launch() |