Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
from faster_whisper import WhisperModel
|
4 |
+
import logging
|
5 |
+
import os
|
6 |
+
import pysrt
|
7 |
+
from transformers import MarianMTModel, MarianTokenizer
|
8 |
+
import ffmpeg
|
9 |
+
|
10 |
+
# Configuration initiale et chargement des données
|
11 |
+
url = "https://huggingface.co/Lenylvt/LanguageISO/resolve/main/iso.md"
|
12 |
+
df = pd.read_csv(url, delimiter="|", skiprows=2, header=None).dropna(axis=1, how='all')
|
13 |
+
df.columns = ['ISO 639-1', 'ISO 639-2', 'Language Name', 'Native Name']
|
14 |
+
df['ISO 639-1'] = df['ISO 639-1'].str.strip()
|
15 |
+
|
16 |
+
language_options = df['ISO 639-1'].tolist()
|
17 |
+
model_size_options = ["tiny", "base", "small", "medium", "large", "large-v2", "large-v3"]
|
18 |
+
|
19 |
+
logging.basicConfig(level=logging.DEBUG)
|
20 |
+
|
21 |
+
def text_to_srt(text):
|
22 |
+
lines = text.split('\n')
|
23 |
+
srt_content = ""
|
24 |
+
for i, line in enumerate(lines):
|
25 |
+
if line.strip() == "":
|
26 |
+
continue
|
27 |
+
try:
|
28 |
+
times, content = line.split(']', 1)
|
29 |
+
start, end = times[1:].split(' -> ')
|
30 |
+
if start.count(":") == 1:
|
31 |
+
start = "00:" + start
|
32 |
+
if end.count(":") == 1:
|
33 |
+
end = "00:" + end
|
34 |
+
srt_content += f"{i+1}\n{start.replace('.', ',')} --> {end.replace('.', ',')}\n{content.strip()}\n\n"
|
35 |
+
except ValueError:
|
36 |
+
continue
|
37 |
+
temp_file_path = '/tmp/output.srt'
|
38 |
+
with open(temp_file_path, 'w', encoding='utf-8') as file:
|
39 |
+
file.write(srt_content)
|
40 |
+
return temp_file_path
|
41 |
+
|
42 |
+
def format_timestamp(seconds):
|
43 |
+
hours = int(seconds // 3600)
|
44 |
+
minutes = int((seconds % 3600) // 60)
|
45 |
+
seconds_remainder = seconds % 60
|
46 |
+
return f"{hours:02d}:{minutes:02d}:{seconds_remainder:06.3f}"
|
47 |
+
|
48 |
+
def translate_text(text, source_language_code, target_language_code):
|
49 |
+
model_name = f"Helsinki-NLP/opus-mt-{source_language_code}-{target_language_code}"
|
50 |
+
if source_language_code == target_language_code:
|
51 |
+
return "Translation between the same languages is not supported."
|
52 |
+
try:
|
53 |
+
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
54 |
+
model = MarianMTModel.from_pretrained(model_name)
|
55 |
+
except Exception as e:
|
56 |
+
return f"Failed to load model for {source_language_code} to {target_language_code}: {str(e)}"
|
57 |
+
translated = model.generate(**tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512))
|
58 |
+
translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)
|
59 |
+
return translated_text
|
60 |
+
|
61 |
+
def transcribe(audio_file_path, model_size="base"):
|
62 |
+
device = "cpu"
|
63 |
+
compute_type = "int8"
|
64 |
+
model = WhisperModel(model_size, device=device, compute_type=compute_type)
|
65 |
+
segments, _ = model.transcribe(audio_file_path)
|
66 |
+
transcription_with_timestamps = [
|
67 |
+
f"[{format_timestamp(segment.start)} -> {format_timestamp(segment.end)}] {segment.text}"
|
68 |
+
for segment in segments
|
69 |
+
]
|
70 |
+
return "\n".join(transcription_with_timestamps)
|
71 |
+
|
72 |
+
def add_subtitle_to_video(input_video, subtitle_file, subtitle_language, soft_subtitle=False):
|
73 |
+
video_input_stream = ffmpeg.input(input_video)
|
74 |
+
subtitle_input_stream = ffmpeg.input(subtitle_file)
|
75 |
+
input_video_name = os.path.splitext(os.path.basename(input_video))[0]
|
76 |
+
output_video = f"/tmp/{input_video_name}_subtitled.mp4"
|
77 |
+
if soft_subtitle:
|
78 |
+
stream = ffmpeg.output(video_input_stream, subtitle_input_stream, output_video, **{"c": "copy", "c:s": "mov_text"})
|
79 |
+
else:
|
80 |
+
stream = ffmpeg.output(video_input_stream, output_video, vf=f"subtitles={subtitle_file}")
|
81 |
+
ffmpeg.run(stream, overwrite_output=True)
|
82 |
+
return output_video
|
83 |
+
|
84 |
+
st.title("Video Subtitling and Translation")
|
85 |
+
|
86 |
+
uploaded_file = st.file_uploader("Upload Video", type=["mp4", "avi", "mov"])
|
87 |
+
action = st.radio("Select Action", ["Transcribe and Add Subtitles", "Transcribe, Translate and Add Subtitles"])
|
88 |
+
source_language = st.selectbox("Source Language", options=language_options, index=language_options.index('en'))
|
89 |
+
target_language = st.selectbox("Target Language", options=language_options, index=language_options.index('fr'))
|
90 |
+
model_size = st.selectbox("Model Size", options=model_size_options)
|
91 |
+
|
92 |
+
if st.button("Process Video"):
|
93 |
+
if uploaded_file is not None:
|
94 |
+
with st.spinner('Processing...'):
|
95 |
+
audio_file_path = f"/tmp/{uploaded_file.name}"
|
96 |
+
with open(audio_file_path, "wb") as f:
|
97 |
+
f.write(uploaded_file.getvalue())
|
98 |
+
transcription = transcribe(audio_file_path, model_size)
|
99 |
+
srt_path = text_to_srt(transcription)
|
100 |
+
if action == "Transcribe and Add Subtitles":
|
101 |
+
output_video_path = add_subtitle_to_video(audio_file_path, srt_path, subtitle_language="eng", soft_subtitle=False)
|
102 |
+
else: # Transcribe, Translate and Add Subtitles
|
103 |
+
translated_srt_path = translate_text(srt_path, source_language, target_language)
|
104 |
+
output_video_path = add_subtitle_to_video(audio_file_path, translated_srt_path, target_language, soft_subtitle=False)
|
105 |
+
st.video(output_video_path)
|
106 |
+
st.success("Processing Completed")
|