EC2 Default User
commited on
Commit
•
03cce66
1
Parent(s):
4aee049
Add application file
Browse files- .ipynb_checkpoints/app-checkpoint.py +20 -0
- app.py +20 -0
- packages.txt +1 -0
- requirements.txt +7 -0
- wav2lip.pth +3 -0
.ipynb_checkpoints/app-checkpoint.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
os.system('git clone https://github.com/Rudrabha/Wav2Lip.git')
|
6 |
+
os.system('curl -o ./Wav2Lip/face_detection/detection/sfd/s3fd.pth https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth')
|
7 |
+
os.system('mv ./Wav2Lip/* .')
|
8 |
+
|
9 |
+
title = "Text2Lip"
|
10 |
+
description = "Wav2Lip With Text"
|
11 |
+
|
12 |
+
|
13 |
+
def inference(face, audio):
|
14 |
+
os.system("python inference.py --checkpoint_path ./wav2lip.pth --face {} --audio {}".format(face, audio))
|
15 |
+
|
16 |
+
return "./results/result_voice.mp4"
|
17 |
+
|
18 |
+
|
19 |
+
iface = gr.Interface(inference, inputs=[gr.inputs.Video(type="mp4", source="upload", label="Talking Face Video (in mp4 format)", optional=False), gr.inputs.Audio(source="upload", type="filepath", label="Audio", optional=False)], outputs=["video"], title=title, description=description, article=article, examples=[["./examples/w2l_test_f1.mp4", "./examples/w2l_test_a1.wav"]], enable_queue=True)
|
20 |
+
iface.launch()
|
app.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
os.system('git clone https://github.com/Rudrabha/Wav2Lip.git')
|
6 |
+
os.system('curl -o ./Wav2Lip/face_detection/detection/sfd/s3fd.pth https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth')
|
7 |
+
os.system('mv ./Wav2Lip/* .')
|
8 |
+
|
9 |
+
title = "Text2Lip"
|
10 |
+
description = "Wav2Lip With Text"
|
11 |
+
|
12 |
+
|
13 |
+
def inference(face, audio):
|
14 |
+
os.system("python inference.py --checkpoint_path ./wav2lip.pth --face {} --audio {}".format(face, audio))
|
15 |
+
|
16 |
+
return "./results/result_voice.mp4"
|
17 |
+
|
18 |
+
|
19 |
+
iface = gr.Interface(inference, inputs=[gr.inputs.Video(type="mp4", source="upload", label="Talking Face Video (in mp4 format)", optional=False), gr.inputs.Audio(source="upload", type="filepath", label="Audio", optional=False)], outputs=["video"], title=title, description=description, article=article, examples=[["./examples/w2l_test_f1.mp4", "./examples/w2l_test_a1.wav"]], enable_queue=True)
|
20 |
+
iface.launch()
|
packages.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ffmpeg
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
librosa==0.7.0
|
2 |
+
numpy>=1.17.3
|
3 |
+
torch==1.4.0
|
4 |
+
torchvision==0.5.0
|
5 |
+
tqdm>=4.45.0
|
6 |
+
numba==0.48
|
7 |
+
opencv-contrib-python-headless==4.1.2.30
|
wav2lip.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b78b681b68ad9fe6c6fb1debc6ff43ad05834a8af8a62ffc4167b7b34ef63c37
|
3 |
+
size 435807851
|