Upload app_celery.py
Browse files- app_celery.py +575 -0
app_celery.py
ADDED
@@ -0,0 +1,575 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, request, jsonify
|
2 |
+
import torch
|
3 |
+
import shutil
|
4 |
+
import os
|
5 |
+
import sys
|
6 |
+
from argparse import ArgumentParser
|
7 |
+
from time import strftime
|
8 |
+
from argparse import Namespace
|
9 |
+
from src.utils.preprocess import CropAndExtract
|
10 |
+
from src.test_audio2coeff import Audio2Coeff
|
11 |
+
from src.facerender.animate import AnimateFromCoeff
|
12 |
+
from src.generate_batch import get_data
|
13 |
+
from src.generate_facerender_batch import get_facerender_data
|
14 |
+
# from src.utils.init_path import init_path
|
15 |
+
import tempfile
|
16 |
+
from openai import OpenAI, AsyncOpenAI
|
17 |
+
import threading
|
18 |
+
import elevenlabs
|
19 |
+
from elevenlabs import set_api_key, generate, play, clone, Voice, VoiceSettings
|
20 |
+
# from flask_cors import CORS, cross_origin
|
21 |
+
# from flask_swagger_ui import get_swaggerui_blueprint
|
22 |
+
import uuid
|
23 |
+
import time
|
24 |
+
from PIL import Image
|
25 |
+
import moviepy.editor as mp
|
26 |
+
import requests
|
27 |
+
import json
|
28 |
+
import pickle
|
29 |
+
from dotenv import load_dotenv
|
30 |
+
from celery import Celery
|
31 |
+
# from gevent import monkey
|
32 |
+
# monkey.patch_all()
|
33 |
+
|
34 |
+
|
35 |
+
# Load environment variables from .env file
|
36 |
+
load_dotenv()
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
class AnimationConfig:
|
41 |
+
def __init__(self, driven_audio_path, source_image_path, result_folder,pose_style,expression_scale,enhancer,still,preprocess,ref_pose_video_path, image_hardcoded):
|
42 |
+
self.driven_audio = driven_audio_path
|
43 |
+
self.source_image = source_image_path
|
44 |
+
self.ref_eyeblink = None
|
45 |
+
self.ref_pose = ref_pose_video_path
|
46 |
+
self.checkpoint_dir = './checkpoints'
|
47 |
+
self.result_dir = result_folder
|
48 |
+
self.pose_style = pose_style
|
49 |
+
self.batch_size = 2
|
50 |
+
self.expression_scale = expression_scale
|
51 |
+
self.input_yaw = None
|
52 |
+
self.input_pitch = None
|
53 |
+
self.input_roll = None
|
54 |
+
self.enhancer = enhancer
|
55 |
+
self.background_enhancer = None
|
56 |
+
self.cpu = False
|
57 |
+
self.face3dvis = False
|
58 |
+
self.still = still
|
59 |
+
self.preprocess = preprocess
|
60 |
+
self.verbose = False
|
61 |
+
self.old_version = False
|
62 |
+
self.net_recon = 'resnet50'
|
63 |
+
self.init_path = None
|
64 |
+
self.use_last_fc = False
|
65 |
+
self.bfm_folder = './checkpoints/BFM_Fitting/'
|
66 |
+
self.bfm_model = 'BFM_model_front.mat'
|
67 |
+
self.focal = 1015.
|
68 |
+
self.center = 112.
|
69 |
+
self.camera_d = 10.
|
70 |
+
self.z_near = 5.
|
71 |
+
self.z_far = 15.
|
72 |
+
self.device = 'cpu'
|
73 |
+
self.image_hardcoded = image_hardcoded
|
74 |
+
|
75 |
+
|
76 |
+
app = Flask(__name__)
|
77 |
+
# CORS(app)
|
78 |
+
app.config['broker_url'] = 'redis://localhost:6379/0'
|
79 |
+
app.config['result_backend'] = 'redis://localhost:6379/0'
|
80 |
+
|
81 |
+
celery = Celery(app.name, broker=app.config['broker_url'])
|
82 |
+
celery.conf.update(app.config)
|
83 |
+
|
84 |
+
TEMP_DIR = None
|
85 |
+
start_time = None
|
86 |
+
|
87 |
+
app.config['temp_response'] = None
|
88 |
+
app.config['generation_thread'] = None
|
89 |
+
app.config['text_prompt'] = None
|
90 |
+
app.config['final_video_path'] = None
|
91 |
+
app.config['final_video_duration'] = None
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
def main(args):
|
97 |
+
print("Entered main function")
|
98 |
+
pic_path = args.source_image
|
99 |
+
audio_path = args.driven_audio
|
100 |
+
save_dir = args.result_dir
|
101 |
+
pose_style = args.pose_style
|
102 |
+
device = args.device
|
103 |
+
batch_size = args.batch_size
|
104 |
+
input_yaw_list = args.input_yaw
|
105 |
+
input_pitch_list = args.input_pitch
|
106 |
+
input_roll_list = args.input_roll
|
107 |
+
ref_eyeblink = args.ref_eyeblink
|
108 |
+
ref_pose = args.ref_pose
|
109 |
+
preprocess = args.preprocess
|
110 |
+
image_hardcoded = args.image_hardcoded
|
111 |
+
|
112 |
+
dir_path = os.path.dirname(os.path.realpath(__file__))
|
113 |
+
current_root_path = dir_path
|
114 |
+
print('current_root_path ',current_root_path)
|
115 |
+
|
116 |
+
# sadtalker_paths = init_path(args.checkpoint_dir, os.path.join(current_root_path, 'src/config'), args.size, args.old_version, args.preprocess)
|
117 |
+
|
118 |
+
path_of_lm_croper = os.path.join(current_root_path, args.checkpoint_dir, 'shape_predictor_68_face_landmarks.dat')
|
119 |
+
path_of_net_recon_model = os.path.join(current_root_path, args.checkpoint_dir, 'epoch_20.pth')
|
120 |
+
dir_of_BFM_fitting = os.path.join(current_root_path, args.checkpoint_dir, 'BFM_Fitting')
|
121 |
+
wav2lip_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'wav2lip.pth')
|
122 |
+
|
123 |
+
audio2pose_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'auido2pose_00140-model.pth')
|
124 |
+
audio2pose_yaml_path = os.path.join(current_root_path, 'src', 'config', 'auido2pose.yaml')
|
125 |
+
|
126 |
+
audio2exp_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'auido2exp_00300-model.pth')
|
127 |
+
audio2exp_yaml_path = os.path.join(current_root_path, 'src', 'config', 'auido2exp.yaml')
|
128 |
+
|
129 |
+
free_view_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'facevid2vid_00189-model.pth.tar')
|
130 |
+
|
131 |
+
if preprocess == 'full':
|
132 |
+
mapping_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'mapping_00109-model.pth.tar')
|
133 |
+
facerender_yaml_path = os.path.join(current_root_path, 'src', 'config', 'facerender_still.yaml')
|
134 |
+
else:
|
135 |
+
mapping_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'mapping_00229-model.pth.tar')
|
136 |
+
facerender_yaml_path = os.path.join(current_root_path, 'src', 'config', 'facerender.yaml')
|
137 |
+
|
138 |
+
|
139 |
+
# preprocess_model = CropAndExtract(sadtalker_paths, device)
|
140 |
+
#init model
|
141 |
+
print(path_of_net_recon_model)
|
142 |
+
preprocess_model = CropAndExtract(path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting, device)
|
143 |
+
|
144 |
+
# audio_to_coeff = Audio2Coeff(sadtalker_paths, device)
|
145 |
+
audio_to_coeff = Audio2Coeff(audio2pose_checkpoint, audio2pose_yaml_path,
|
146 |
+
audio2exp_checkpoint, audio2exp_yaml_path,
|
147 |
+
wav2lip_checkpoint, device)
|
148 |
+
# animate_from_coeff = AnimateFromCoeff(sadtalker_paths, device)
|
149 |
+
animate_from_coeff = AnimateFromCoeff(free_view_checkpoint, mapping_checkpoint,
|
150 |
+
facerender_yaml_path, device)
|
151 |
+
|
152 |
+
first_frame_dir = os.path.join(save_dir, 'first_frame_dir')
|
153 |
+
os.makedirs(first_frame_dir, exist_ok=True)
|
154 |
+
# first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess,\
|
155 |
+
# source_image_flag=True, pic_size=args.size)
|
156 |
+
|
157 |
+
|
158 |
+
fixed_temp_dir = "/tmp/preprocess_data"
|
159 |
+
os.makedirs(fixed_temp_dir, exist_ok=True)
|
160 |
+
preprocessed_data_path = os.path.join(fixed_temp_dir, "preprocessed_data.pkl")
|
161 |
+
|
162 |
+
if os.path.exists(preprocessed_data_path) and image_hardcoded == "yes":
|
163 |
+
print("Loading preprocessed data...")
|
164 |
+
with open(preprocessed_data_path, "rb") as f:
|
165 |
+
preprocessed_data = pickle.load(f)
|
166 |
+
first_coeff_new_path = preprocessed_data["first_coeff_path"]
|
167 |
+
crop_pic_new_path = preprocessed_data["crop_pic_path"]
|
168 |
+
crop_info_path = preprocessed_data["crop_info_path"]
|
169 |
+
with open(crop_info_path, "rb") as f:
|
170 |
+
crop_info = pickle.load(f)
|
171 |
+
|
172 |
+
print(f"Loaded existing preprocessed data from: {preprocessed_data_path}")
|
173 |
+
|
174 |
+
else:
|
175 |
+
print("Running preprocessing...")
|
176 |
+
first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess, source_image_flag=True)
|
177 |
+
first_coeff_new_path = os.path.join(fixed_temp_dir, os.path.basename(first_coeff_path))
|
178 |
+
crop_pic_new_path = os.path.join(fixed_temp_dir, os.path.basename(crop_pic_path))
|
179 |
+
crop_info_new_path = os.path.join(fixed_temp_dir, "crop_info.pkl")
|
180 |
+
shutil.move(first_coeff_path, first_coeff_new_path)
|
181 |
+
shutil.move(crop_pic_path, crop_pic_new_path)
|
182 |
+
|
183 |
+
with open(crop_info_new_path, "wb") as f:
|
184 |
+
pickle.dump(crop_info, f)
|
185 |
+
|
186 |
+
preprocessed_data = {"first_coeff_path": first_coeff_new_path,
|
187 |
+
"crop_pic_path": crop_pic_new_path,
|
188 |
+
"crop_info_path": crop_info_new_path}
|
189 |
+
|
190 |
+
|
191 |
+
with open(preprocessed_data_path, "wb") as f:
|
192 |
+
pickle.dump(preprocessed_data, f)
|
193 |
+
print(f"Preprocessed data saved to: {preprocessed_data_path}")
|
194 |
+
|
195 |
+
print('first_coeff_path ',first_coeff_new_path)
|
196 |
+
print('crop_pic_path ',crop_pic_new_path)
|
197 |
+
print('crop_info ',crop_info)
|
198 |
+
|
199 |
+
if first_coeff_new_path is None:
|
200 |
+
print("Can't get the coeffs of the input")
|
201 |
+
return
|
202 |
+
|
203 |
+
if ref_eyeblink is not None:
|
204 |
+
ref_eyeblink_videoname = os.path.splitext(os.path.split(ref_eyeblink)[-1])[0]
|
205 |
+
ref_eyeblink_frame_dir = os.path.join(save_dir, ref_eyeblink_videoname)
|
206 |
+
os.makedirs(ref_eyeblink_frame_dir, exist_ok=True)
|
207 |
+
# ref_eyeblink_coeff_path, _, _ = preprocess_model.generate(ref_eyeblink, ref_eyeblink_frame_dir, args.preprocess, source_image_flag=False)
|
208 |
+
ref_eyeblink_coeff_path, _, _ = preprocess_model.generate(ref_eyeblink, ref_eyeblink_frame_dir)
|
209 |
+
else:
|
210 |
+
ref_eyeblink_coeff_path=None
|
211 |
+
print('ref_eyeblink_coeff_path',ref_eyeblink_coeff_path)
|
212 |
+
|
213 |
+
if ref_pose is not None:
|
214 |
+
if ref_pose == ref_eyeblink:
|
215 |
+
ref_pose_coeff_path = ref_eyeblink_coeff_path
|
216 |
+
else:
|
217 |
+
ref_pose_videoname = os.path.splitext(os.path.split(ref_pose)[-1])[0]
|
218 |
+
ref_pose_frame_dir = os.path.join(save_dir, ref_pose_videoname)
|
219 |
+
os.makedirs(ref_pose_frame_dir, exist_ok=True)
|
220 |
+
# ref_pose_coeff_path, _, _ = preprocess_model.generate(ref_pose, ref_pose_frame_dir, args.preprocess, source_image_flag=False)
|
221 |
+
ref_pose_coeff_path, _, _ = preprocess_model.generate(ref_pose, ref_pose_frame_dir)
|
222 |
+
else:
|
223 |
+
ref_pose_coeff_path=None
|
224 |
+
print('ref_eyeblink_coeff_path',ref_pose_coeff_path)
|
225 |
+
|
226 |
+
batch = get_data(first_coeff_new_path, audio_path, device, ref_eyeblink_coeff_path, still=args.still)
|
227 |
+
coeff_path = audio_to_coeff.generate(batch, save_dir, pose_style, ref_pose_coeff_path)
|
228 |
+
|
229 |
+
|
230 |
+
if args.face3dvis:
|
231 |
+
from src.face3d.visualize import gen_composed_video
|
232 |
+
gen_composed_video(args, device, first_coeff_new_path, coeff_path, audio_path, os.path.join(save_dir, '3dface.mp4'))
|
233 |
+
|
234 |
+
# data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path,
|
235 |
+
# batch_size, input_yaw_list, input_pitch_list, input_roll_list,
|
236 |
+
# expression_scale=args.expression_scale, still_mode=args.still, preprocess=args.preprocess, size=args.size)
|
237 |
+
|
238 |
+
|
239 |
+
data = get_facerender_data(coeff_path, crop_pic_new_path, first_coeff_new_path, audio_path,
|
240 |
+
batch_size, input_yaw_list, input_pitch_list, input_roll_list,
|
241 |
+
expression_scale=args.expression_scale, still_mode=args.still, preprocess=args.preprocess)
|
242 |
+
|
243 |
+
# result, base64_video,temp_file_path= animate_from_coeff.generate(data, save_dir, pic_path, crop_info, \
|
244 |
+
# enhancer=args.enhancer, background_enhancer=args.background_enhancer, preprocess=args.preprocess, img_size=args.size)
|
245 |
+
|
246 |
+
|
247 |
+
result, base64_video,temp_file_path,new_audio_path = animate_from_coeff.generate(data, save_dir, pic_path, crop_info, \
|
248 |
+
enhancer=args.enhancer, background_enhancer=args.background_enhancer, preprocess=args.preprocess)
|
249 |
+
|
250 |
+
|
251 |
+
video_clip = mp.VideoFileClip(temp_file_path)
|
252 |
+
duration = video_clip.duration
|
253 |
+
|
254 |
+
app.config['temp_response'] = base64_video
|
255 |
+
app.config['final_video_path'] = temp_file_path
|
256 |
+
app.config['final_video_duration'] = duration
|
257 |
+
|
258 |
+
return base64_video, temp_file_path, duration
|
259 |
+
|
260 |
+
|
261 |
+
def create_temp_dir():
|
262 |
+
return tempfile.TemporaryDirectory()
|
263 |
+
|
264 |
+
def save_uploaded_file(file, filename,TEMP_DIR):
|
265 |
+
print("Entered save_uploaded_file")
|
266 |
+
unique_filename = str(uuid.uuid4()) + "_" + filename
|
267 |
+
file_path = os.path.join(TEMP_DIR.name, unique_filename)
|
268 |
+
file.save(file_path)
|
269 |
+
return file_path
|
270 |
+
|
271 |
+
client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
|
272 |
+
|
273 |
+
def openai_chat_avatar(text_prompt):
|
274 |
+
response = client.chat.completions.create(
|
275 |
+
model="gpt-4o-mini",
|
276 |
+
messages=[{"role": "system", "content": "Answer using the minimum words you can ever use."},
|
277 |
+
{"role": "user", "content": f"Hi! I need help with something. Can you assist me with the following: {text_prompt}"},
|
278 |
+
],
|
279 |
+
max_tokens = len(text_prompt) + 300 # Use the length of the input text
|
280 |
+
# temperature=0.3,
|
281 |
+
# stop=["Translate:", "Text:"]
|
282 |
+
)
|
283 |
+
return response
|
284 |
+
|
285 |
+
def ryzedb_chat_avatar(question):
|
286 |
+
url = "https://inference.dev.ryzeai.ai/chat/stream"
|
287 |
+
question = question + ". Summarize and Answer using the minimum words you can ever use."
|
288 |
+
payload = json.dumps({
|
289 |
+
"input": {
|
290 |
+
"chat_history": [],
|
291 |
+
"app_id": os.getenv('RYZE_APP_ID'),
|
292 |
+
"question": question
|
293 |
+
},
|
294 |
+
"config": {}
|
295 |
+
})
|
296 |
+
headers = {
|
297 |
+
'Content-Type': 'application/json'
|
298 |
+
}
|
299 |
+
|
300 |
+
try:
|
301 |
+
# Send the POST request
|
302 |
+
response = requests.request("POST", url, headers=headers, data=payload)
|
303 |
+
|
304 |
+
# Check for successful request
|
305 |
+
response.raise_for_status()
|
306 |
+
|
307 |
+
# Return the response JSON
|
308 |
+
return response.text
|
309 |
+
|
310 |
+
except requests.exceptions.RequestException as e:
|
311 |
+
print(f"An error occurred: {e}")
|
312 |
+
return None
|
313 |
+
|
314 |
+
def custom_cleanup(temp_dir, exclude_dir):
|
315 |
+
# Iterate over the files and directories in TEMP_DIR
|
316 |
+
for filename in os.listdir(temp_dir):
|
317 |
+
file_path = os.path.join(temp_dir, filename)
|
318 |
+
# Skip the directory we want to exclude
|
319 |
+
if file_path != exclude_dir:
|
320 |
+
try:
|
321 |
+
if os.path.isdir(file_path):
|
322 |
+
shutil.rmtree(file_path)
|
323 |
+
else:
|
324 |
+
os.remove(file_path)
|
325 |
+
print(f"Deleted: {file_path}")
|
326 |
+
except Exception as e:
|
327 |
+
print(f"Failed to delete {file_path}. Reason: {e}")
|
328 |
+
|
329 |
+
|
330 |
+
def generate_audio(voice_cloning, voice_gender, text_prompt):
|
331 |
+
print("generate_audio")
|
332 |
+
if voice_cloning == 'no':
|
333 |
+
if voice_gender == 'male':
|
334 |
+
voice = 'echo'
|
335 |
+
print('Entering Audio creation using elevenlabs')
|
336 |
+
set_api_key("92e149985ea2732b4359c74346c3daee")
|
337 |
+
|
338 |
+
audio = generate(text = text_prompt, voice = "Daniel", model = "eleven_multilingual_v2",stream=True, latency=4)
|
339 |
+
with tempfile.NamedTemporaryFile(suffix=".mp3", prefix="text_to_speech_",dir=TEMP_DIR.name, delete=False) as temp_file:
|
340 |
+
for chunk in audio:
|
341 |
+
temp_file.write(chunk)
|
342 |
+
driven_audio_path = temp_file.name
|
343 |
+
print('driven_audio_path',driven_audio_path)
|
344 |
+
print('Audio file saved using elevenlabs')
|
345 |
+
|
346 |
+
else:
|
347 |
+
voice = 'nova'
|
348 |
+
|
349 |
+
print('Entering Audio creation using whisper')
|
350 |
+
response = client.audio.speech.create(model="tts-1-hd",
|
351 |
+
voice=voice,
|
352 |
+
input = text_prompt)
|
353 |
+
|
354 |
+
print('Audio created using whisper')
|
355 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", prefix="text_to_speech_",dir=TEMP_DIR.name, delete=False) as temp_file:
|
356 |
+
driven_audio_path = temp_file.name
|
357 |
+
|
358 |
+
response.write_to_file(driven_audio_path)
|
359 |
+
print('Audio file saved using whisper')
|
360 |
+
|
361 |
+
elif voice_cloning == 'yes':
|
362 |
+
set_api_key("92e149985ea2732b4359c74346c3daee")
|
363 |
+
# voice = clone(name = "User Cloned Voice",
|
364 |
+
# files = [user_voice_path] )
|
365 |
+
voice = Voice(voice_id="CEii8R8RxmB0zhAiloZg",name="Marc",settings=VoiceSettings(
|
366 |
+
stability=0.71, similarity_boost=0.5, style=0.0, use_speaker_boost=True),)
|
367 |
+
|
368 |
+
audio = generate(text = text_prompt, voice = voice, model = "eleven_multilingual_v2",stream=True, latency=4)
|
369 |
+
with tempfile.NamedTemporaryFile(suffix=".mp3", prefix="cloned_audio_",dir=TEMP_DIR.name, delete=False) as temp_file:
|
370 |
+
for chunk in audio:
|
371 |
+
temp_file.write(chunk)
|
372 |
+
driven_audio_path = temp_file.name
|
373 |
+
print('driven_audio_path',driven_audio_path)
|
374 |
+
|
375 |
+
return driven_audio_path
|
376 |
+
|
377 |
+
def split_audio(audio_path, chunk_duration=5):
|
378 |
+
audio_clip = mp.AudioFileClip(audio_path)
|
379 |
+
total_duration = audio_clip.duration
|
380 |
+
|
381 |
+
audio_chunks = []
|
382 |
+
for start_time in range(0, int(total_duration), chunk_duration):
|
383 |
+
end_time = min(start_time + chunk_duration, total_duration)
|
384 |
+
chunk = audio_clip.subclip(start_time, end_time)
|
385 |
+
with tempfile.NamedTemporaryFile(suffix=f"_chunk_{start_time}-{end_time}.wav", prefix="audio_chunk_", dir=TEMP_DIR.name, delete=False) as temp_file:
|
386 |
+
chunk_path = temp_file.name
|
387 |
+
chunk.write_audiofile(chunk_path)
|
388 |
+
audio_chunks.append(chunk_path)
|
389 |
+
|
390 |
+
return audio_chunks
|
391 |
+
|
392 |
+
@celery.task
|
393 |
+
def process_video_for_chunk(audio_chunk_path, args_dict, chunk_index):
|
394 |
+
print("Entered process_video_for_chunk")
|
395 |
+
args = AnimationConfig(
|
396 |
+
driven_audio_path=args_dict['driven_audio_path'],
|
397 |
+
source_image_path=args_dict['source_image_path'],
|
398 |
+
result_folder=args_dict['result_folder'],
|
399 |
+
pose_style=args_dict['pose_style'],
|
400 |
+
expression_scale=args_dict['expression_scale'],
|
401 |
+
enhancer=args_dict['enhancer'],
|
402 |
+
still=args_dict['still'],
|
403 |
+
preprocess=args_dict['preprocess'],
|
404 |
+
ref_pose_video_path=args_dict['ref_pose_video_path'],
|
405 |
+
image_hardcoded=args_dict['image_hardcoded']
|
406 |
+
)
|
407 |
+
args.driven_audio = audio_chunk_path
|
408 |
+
chunk_save_dir = os.path.join(args.result_dir, f"chunk_{chunk_index}")
|
409 |
+
os.makedirs(chunk_save_dir, exist_ok=True)
|
410 |
+
print("args",args)
|
411 |
+
try:
|
412 |
+
base64_video, video_chunk_path, duration = main(args)
|
413 |
+
print(f"Main function returned: {video_chunk_path}, {duration}")
|
414 |
+
return video_chunk_path
|
415 |
+
except Exception as e:
|
416 |
+
print(f"Error in process_video_for_chunk: {str(e)}")
|
417 |
+
raise
|
418 |
+
# base64_video, video_chunk_path, duration = main(args)
|
419 |
+
# return video_chunk_path
|
420 |
+
|
421 |
+
|
422 |
+
@app.route("/run", methods=['POST'])
|
423 |
+
def generate_video():
|
424 |
+
global start_time
|
425 |
+
start_time = time.time()
|
426 |
+
global TEMP_DIR
|
427 |
+
TEMP_DIR = create_temp_dir()
|
428 |
+
print('request:',request.method)
|
429 |
+
try:
|
430 |
+
if request.method == 'POST':
|
431 |
+
# source_image = request.files['source_image']
|
432 |
+
image_path = 'C:/Users/fd01076/Downloads/marc.png'
|
433 |
+
source_image = Image.open(image_path)
|
434 |
+
text_prompt = request.form['text_prompt']
|
435 |
+
|
436 |
+
print('Input text prompt: ',text_prompt)
|
437 |
+
text_prompt = text_prompt.strip()
|
438 |
+
if not text_prompt:
|
439 |
+
return jsonify({'error': 'Input text prompt cannot be blank'}), 400
|
440 |
+
|
441 |
+
voice_cloning = request.form.get('voice_cloning', 'yes')
|
442 |
+
image_hardcoded = request.form.get('image_hardcoded', 'yes')
|
443 |
+
chat_model_used = request.form.get('chat_model_used', 'openai')
|
444 |
+
target_language = request.form.get('target_language', 'original_text')
|
445 |
+
print('target_language',target_language)
|
446 |
+
pose_style = int(request.form.get('pose_style', 1))
|
447 |
+
expression_scale = float(request.form.get('expression_scale', 1))
|
448 |
+
enhancer = request.form.get('enhancer', None)
|
449 |
+
voice_gender = request.form.get('voice_gender', 'male')
|
450 |
+
still_str = request.form.get('still', 'False')
|
451 |
+
still = still_str.lower() == 'false'
|
452 |
+
print('still', still)
|
453 |
+
preprocess = request.form.get('preprocess', 'crop')
|
454 |
+
print('preprocess selected: ',preprocess)
|
455 |
+
ref_pose_video = request.files.get('ref_pose', None)
|
456 |
+
|
457 |
+
if chat_model_used == 'ryzedb':
|
458 |
+
response = ryzedb_chat_avatar(text_prompt)
|
459 |
+
events = response.split('\r\n\r\n')
|
460 |
+
content = None
|
461 |
+
for event in events:
|
462 |
+
# Split each event block by "\r\n" to get the lines
|
463 |
+
lines = event.split('\r\n')
|
464 |
+
if len(lines) > 1 and lines[0] == 'event: data':
|
465 |
+
# Extract the JSON part from the second line and parse it
|
466 |
+
json_data = lines[1].replace('data: ', '')
|
467 |
+
try:
|
468 |
+
data = json.loads(json_data)
|
469 |
+
text_prompt = data.get('content')
|
470 |
+
app.config['text_prompt'] = text_prompt
|
471 |
+
print('Final output text prompt using ryzedb: ',text_prompt)
|
472 |
+
break # Exit the loop once content is found
|
473 |
+
except json.JSONDecodeError:
|
474 |
+
continue
|
475 |
+
|
476 |
+
else:
|
477 |
+
# response = openai_chat_avatar(text_prompt)
|
478 |
+
# text_prompt = response.choices[0].message.content.strip()
|
479 |
+
app.config['text_prompt'] = text_prompt
|
480 |
+
print('Final output text prompt using openai: ',text_prompt)
|
481 |
+
|
482 |
+
source_image_path = save_uploaded_file(source_image, 'source_image.png',TEMP_DIR)
|
483 |
+
print(source_image_path)
|
484 |
+
|
485 |
+
driven_audio_path = generate_audio(voice_cloning, voice_gender, text_prompt)
|
486 |
+
chunk_duration = 5
|
487 |
+
print(f"Splitting the audio into {chunk_duration}-second chunks...")
|
488 |
+
audio_chunks = split_audio(driven_audio_path, chunk_duration=chunk_duration)
|
489 |
+
print(f"Audio has been split into {len(audio_chunks)} chunks: {audio_chunks}")
|
490 |
+
|
491 |
+
save_dir = tempfile.mkdtemp(dir=TEMP_DIR.name)
|
492 |
+
result_folder = os.path.join(save_dir, "results")
|
493 |
+
os.makedirs(result_folder, exist_ok=True)
|
494 |
+
|
495 |
+
ref_pose_video_path = None
|
496 |
+
if ref_pose_video:
|
497 |
+
with tempfile.NamedTemporaryFile(suffix=".mp4", prefix="ref_pose_",dir=TEMP_DIR.name, delete=False) as temp_file:
|
498 |
+
ref_pose_video_path = temp_file.name
|
499 |
+
ref_pose_video.save(ref_pose_video_path)
|
500 |
+
print('ref_pose_video_path',ref_pose_video_path)
|
501 |
+
|
502 |
+
except Exception as e:
|
503 |
+
app.logger.error(f"An error occurred: {e}")
|
504 |
+
return "An error occurred", 500
|
505 |
+
|
506 |
+
# args = AnimationConfig(driven_audio_path=driven_audio_path, source_image_path=source_image_path, result_folder=result_folder, pose_style=pose_style, expression_scale=expression_scale,enhancer=enhancer,still=still,preprocess=preprocess,ref_pose_video_path=ref_pose_video_path, image_hardcoded=image_hardcoded)
|
507 |
+
args_dict = {
|
508 |
+
'driven_audio_path': driven_audio_path,
|
509 |
+
'source_image_path': source_image_path,
|
510 |
+
'result_folder': result_folder,
|
511 |
+
'pose_style': pose_style,
|
512 |
+
'expression_scale': expression_scale,
|
513 |
+
'enhancer': enhancer,
|
514 |
+
'still': still,
|
515 |
+
'preprocess': preprocess,
|
516 |
+
'ref_pose_video_path': ref_pose_video_path,
|
517 |
+
'image_hardcoded': image_hardcoded,
|
518 |
+
'device': 'cuda' if torch.cuda.is_available() else 'cpu'}
|
519 |
+
|
520 |
+
# if torch.cuda.is_available() and not args.cpu:
|
521 |
+
# args.device = "cuda"
|
522 |
+
# else:
|
523 |
+
# args.device = "cpu"
|
524 |
+
|
525 |
+
try:
|
526 |
+
# base64_video, temp_file_path, duration = main(args)
|
527 |
+
# final_video_path = app.config['final_video_path']
|
528 |
+
# print('final_video_path',final_video_path)
|
529 |
+
chunk_tasks = []
|
530 |
+
for index, audio_chunk in enumerate(audio_chunks):
|
531 |
+
print(f"Submitting chunk {index} with audio_chunk: {audio_chunk}")
|
532 |
+
task = process_video_for_chunk.apply_async(args=[audio_chunk, args_dict, index])
|
533 |
+
print(f"Task {task.id} submitted for chunk {index}")
|
534 |
+
chunk_tasks.append(task)
|
535 |
+
print("chunk_tasks",chunk_tasks)
|
536 |
+
|
537 |
+
# video_chunk_paths = [task.get() for task in chunk_tasks]
|
538 |
+
# print(f"Video chunks generated: {video_chunk_paths}")
|
539 |
+
video_chunk_paths = []
|
540 |
+
for task in chunk_tasks:
|
541 |
+
try:
|
542 |
+
video_chunk_path = task.get() # Wait for the task to complete
|
543 |
+
video_chunk_paths.append(video_chunk_path)
|
544 |
+
except Exception as e:
|
545 |
+
print(f"Error while fetching task result: {str(e)}")
|
546 |
+
return jsonify({'status': 'error', 'message': str(e)}), 500
|
547 |
+
|
548 |
+
print(f"Video chunks generated: {video_chunk_paths}")
|
549 |
+
preprocess_dir = os.path.join("/tmp", "preprocess_data")
|
550 |
+
custom_cleanup(TEMP_DIR.name, preprocess_dir)
|
551 |
+
|
552 |
+
print("Temporary files cleaned up, but preprocess_data is retained.")
|
553 |
+
return jsonify({
|
554 |
+
'status': 'completed',
|
555 |
+
'video_chunk_paths': video_chunk_paths
|
556 |
+
})
|
557 |
+
|
558 |
+
# return jsonify({
|
559 |
+
# 'base64_video': base64_video,
|
560 |
+
# 'text_prompt': text_prompt,
|
561 |
+
# 'duration': duration,
|
562 |
+
# 'status': 'completed'
|
563 |
+
# })
|
564 |
+
|
565 |
+
except Exception as e:
|
566 |
+
return jsonify({'status': 'error', 'message': str(e)}), 500
|
567 |
+
|
568 |
+
|
569 |
+
|
570 |
+
@app.route("/health", methods=["GET"])
|
571 |
+
def health_status():
|
572 |
+
response = {"online": "true"}
|
573 |
+
return jsonify(response)
|
574 |
+
if __name__ == '__main__':
|
575 |
+
app.run(debug=True)
|