artificialguybr commited on
Commit
73fd4c0
1 Parent(s): 88a4625

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -39
app.py CHANGED
@@ -1,41 +1,85 @@
 
 
1
  import os
2
- import wget
3
- import zipfile
4
-
5
-
6
- # Clone necessary repositories
7
- os.system("git clone https://github.com/vinthony/video-retalking.git")
8
- os.system("git clone https://github.com/davisking/dlib.git")
9
- os.system("git clone https://github.com/openai/whisper.git")
10
-
11
- # Install dlib
12
- os.system("cd dlib && python setup.py install")
13
-
14
- # Create checkpoints directory in video-retalking
15
- os.makedirs("./video-retalking/checkpoints", exist_ok=True)
16
-
17
- # Download model checkpoints and other files
18
- model_urls = [
19
- "https://github.com/vinthony/video-retalking/releases/download/v0.0.1/30_net_gen.pth",
20
- "https://github.com/vinthony/video-retalking/releases/download/v0.0.1/BFM.zip",
21
- "https://github.com/vinthony/video-retalking/releases/download/v0.0.1/DNet.pt",
22
- "https://github.com/vinthony/video-retalking/releases/download/v0.0.1/ENet.pth",
23
- "https://github.com/vinthony/video-retalking/releases/download/v0.0.1/expression.mat",
24
- "https://github.com/vinthony/video-retalking/releases/download/v0.0.1/face3d_pretrain_epoch_20.pth",
25
- "https://github.com/vinthony/video-retalking/releases/download/v0.0.1/GFPGANv1.3.pth",
26
- "https://github.com/vinthony/video-retalking/releases/download/v0.0.1/GPEN-BFR-512.pth",
27
- "https://github.com/vinthony/video-retalking/releases/download/v0.0.1/LNet.pth",
28
- "https://github.com/vinthony/video-retalking/releases/download/v0.0.1/ParseNet-latest.pth",
29
- "https://github.com/vinthony/video-retalking/releases/download/v0.0.1/shape_predictor_68_face_landmarks.dat"
30
- ]
31
-
32
- for url in model_urls:
33
- wget.download(url, out="./video-retalking/checkpoints")
34
-
35
- # Unzip files
36
- with zipfile.ZipFile("./video-retalking/checkpoints/BFM.zip", 'r') as zip_ref:
37
- zip_ref.extractall("./video-retalking/checkpoints")
38
-
39
- # Install Python packages
40
- #os.system("pip install basicsr==1.4.2 face-alignment==1.3.4 kornia==0.5.1 ninja==1.10.2.3 einops==0.4.1 facexlib==0.2.5 librosa==0.9.2 build")
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import subprocess
3
  import os
4
+ from googletrans import Translator
5
+ from TTS.api import TTS
6
+ import ffmpeg
7
+ import whisper
8
+ from scipy.signal import wiener
9
+ import soundfile as sf
10
+ from pydub import AudioSegment
11
+ import numpy as np
12
+ import shlex
13
+ import librosa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ os.environ["COQUI_TOS_AGREED"] = "1"
16
+ def process_video(video, high_quality, target_language):
17
+ output_filename = "resized_video.mp4"
18
+ if high_quality:
19
+ ffmpeg.input(video).output(output_filename, vf='scale=-1:720').run()
20
+ video_path = output_filename
21
+ else:
22
+ video_path = video
23
+
24
+ # Debugging Step 1: Check if video_path exists
25
+ if not os.path.exists(video_path):
26
+ return f"Error: {video_path} does not exist."
27
+
28
+ ffmpeg.input(video_path).output('output_audio.wav', acodec='pcm_s24le', ar=48000, map='a').run()
29
+
30
+ y, sr = sf.read("output_audio.wav")
31
+ y = y.astype(np.float32)
32
+ y_denoised = wiener(y)
33
+ sf.write("output_audio_denoised.wav", y_denoised, sr)
34
+
35
+ sound = AudioSegment.from_file("output_audio_denoised.wav", format="wav")
36
+ sound = sound.apply_gain(0) # Reduce gain by 5 dB
37
+ sound = sound.low_pass_filter(3000).high_pass_filter(100)
38
+ sound.export("output_audio_processed.wav", format="wav")
39
+
40
+ shell_command = f"ffmpeg -y -i output_audio_processed.wav -af lowpass=3000,highpass=100 output_audio_final.wav".split(" ")
41
+ subprocess.run([item for item in shell_command], capture_output=False, text=True, check=True)
42
+
43
+ model = whisper.load_model("base")
44
+ result = model.transcribe("output_audio_final.wav")
45
+ whisper_text = result["text"]
46
+ whisper_language = result['language']
47
+
48
+ language_mapping = {'English': 'en', 'Spanish': 'es', 'French': 'fr', 'German': 'de', 'Italian': 'it', 'Portuguese': 'pt', 'Polish': 'pl', 'Turkish': 'tr', 'Russian': 'ru', 'Dutch': 'nl', 'Czech': 'cs', 'Arabic': 'ar', 'Chinese (Simplified)': 'zh-cn'}
49
+ target_language_code = language_mapping[target_language]
50
+ translator = Translator()
51
+ translated_text = translator.translate(whisper_text, src=whisper_language, dest=target_language_code).text
52
+
53
+ tts = TTS("tts_models/multilingual/multi-dataset/xtts_v1")
54
+ tts.to('cuda') # Replacing deprecated gpu=True
55
+ tts.tts_to_file(translated_text, speaker_wav='output_audio_final.wav', file_path="output_synth.wav", language=target_language_code)
56
+
57
+ pad_top = 0
58
+ pad_bottom = 15
59
+ pad_left = 0
60
+ pad_right = 0
61
+ rescaleFactor = 1
62
+
63
+ # Debugging Step 2: Remove quotes around the video path
64
+ video_path_fix = video_path
65
+
66
+ cmd = f"python Wav2Lip/inference.py --checkpoint_path '/Wav2Lip/checkpoints/wav2lip_gan.pth' --face {shlex.quote(video_path_fix)} --audio 'output_synth.wav' --pads {pad_top} {pad_bottom} {pad_left} {pad_right} --resize_factor {rescaleFactor} --nosmooth --outfile 'output_video.mp4'"
67
+ subprocess.run(cmd, shell=True)
68
+ # Debugging Step 3: Check if output video exists
69
+ if not os.path.exists("output_video.mp4"):
70
+ return "Error: output_video.mp4 was not generated."
71
+
72
+ return "output_video.mp4"
73
+
74
+ iface = gr.Interface(
75
+ fn=process_video,
76
+ inputs=[
77
+ gr.Video(),
78
+ gr.inputs.Checkbox(label="High Quality"),
79
+ gr.inputs.Dropdown(choices=["English", "Spanish", "French", "German", "Italian", "Portuguese", "Polish", "Turkish", "Russian", "Dutch", "Czech", "Arabic", "Chinese (Simplified)"], label="Target Language for Dubbing")
80
+ ],
81
+ outputs=gr.outputs.File(),
82
+ live=False
83
+ )
84
+
85
+ iface.launch(share=True)