Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -115,10 +115,85 @@ def main(args):
|
|
115 |
mapping_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'mapping_00229-model.pth.tar')
|
116 |
facerender_yaml_path = os.path.join(current_root_path, 'src', 'config', 'facerender.yaml')
|
117 |
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
audio_path = "/home/user/app/images/audio_1.mp3" # Replace with the path to your audio file
|
120 |
-
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
|
121 |
-
temp_file_path = temp_file.name
|
122 |
output_path = temp_file_path
|
123 |
|
124 |
# Call the function
|
@@ -136,90 +211,16 @@ def main(args):
|
|
136 |
LNet_batch_size=16,
|
137 |
without_rl1=False
|
138 |
)
|
139 |
-
|
140 |
-
# # preprocess_model = CropAndExtract(sadtalker_paths, device)
|
141 |
-
# #init model
|
142 |
-
# print(path_of_net_recon_model)
|
143 |
-
# preprocess_model = CropAndExtract(path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting, device)
|
144 |
-
|
145 |
-
# # audio_to_coeff = Audio2Coeff(sadtalker_paths, device)
|
146 |
-
# audio_to_coeff = Audio2Coeff(audio2pose_checkpoint, audio2pose_yaml_path,
|
147 |
-
# audio2exp_checkpoint, audio2exp_yaml_path,
|
148 |
-
# wav2lip_checkpoint, device)
|
149 |
-
# # animate_from_coeff = AnimateFromCoeff(sadtalker_paths, device)
|
150 |
-
# animate_from_coeff = AnimateFromCoeff(free_view_checkpoint, mapping_checkpoint,
|
151 |
-
# facerender_yaml_path, device)
|
152 |
-
|
153 |
-
# first_frame_dir = os.path.join(save_dir, 'first_frame_dir')
|
154 |
-
# os.makedirs(first_frame_dir, exist_ok=True)
|
155 |
-
# # first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess,\
|
156 |
-
# # source_image_flag=True, pic_size=args.size)
|
157 |
-
|
158 |
-
|
159 |
-
# first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess, source_image_flag=True)
|
160 |
-
# print('first_coeff_path ',first_coeff_path)
|
161 |
-
# print('crop_pic_path ',crop_pic_path)
|
162 |
-
|
163 |
-
# if first_coeff_path is None:
|
164 |
-
# print("Can't get the coeffs of the input")
|
165 |
-
# return
|
166 |
-
|
167 |
-
# if ref_eyeblink is not None:
|
168 |
-
# ref_eyeblink_videoname = os.path.splitext(os.path.split(ref_eyeblink)[-1])[0]
|
169 |
-
# ref_eyeblink_frame_dir = os.path.join(save_dir, ref_eyeblink_videoname)
|
170 |
-
# os.makedirs(ref_eyeblink_frame_dir, exist_ok=True)
|
171 |
-
# # ref_eyeblink_coeff_path, _, _ = preprocess_model.generate(ref_eyeblink, ref_eyeblink_frame_dir, args.preprocess, source_image_flag=False)
|
172 |
-
# ref_eyeblink_coeff_path, _, _ = preprocess_model.generate(ref_eyeblink, ref_eyeblink_frame_dir)
|
173 |
-
# else:
|
174 |
-
# ref_eyeblink_coeff_path=None
|
175 |
-
# print('ref_eyeblink_coeff_path',ref_eyeblink_coeff_path)
|
176 |
-
|
177 |
-
# if ref_pose is not None:
|
178 |
-
# if ref_pose == ref_eyeblink:
|
179 |
-
# ref_pose_coeff_path = ref_eyeblink_coeff_path
|
180 |
-
# else:
|
181 |
-
# ref_pose_videoname = os.path.splitext(os.path.split(ref_pose)[-1])[0]
|
182 |
-
# ref_pose_frame_dir = os.path.join(save_dir, ref_pose_videoname)
|
183 |
-
# os.makedirs(ref_pose_frame_dir, exist_ok=True)
|
184 |
-
# # ref_pose_coeff_path, _, _ = preprocess_model.generate(ref_pose, ref_pose_frame_dir, args.preprocess, source_image_flag=False)
|
185 |
-
# ref_pose_coeff_path, _, _ = preprocess_model.generate(ref_pose, ref_pose_frame_dir)
|
186 |
-
# else:
|
187 |
-
# ref_pose_coeff_path=None
|
188 |
-
# print('ref_eyeblink_coeff_path',ref_pose_coeff_path)
|
189 |
-
|
190 |
-
# batch = get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=args.still)
|
191 |
-
# coeff_path = audio_to_coeff.generate(batch, save_dir, pose_style, ref_pose_coeff_path)
|
192 |
-
|
193 |
-
|
194 |
-
# if args.face3dvis:
|
195 |
-
# from src.face3d.visualize import gen_composed_video
|
196 |
-
# gen_composed_video(args, device, first_coeff_path, coeff_path, audio_path, os.path.join(save_dir, '3dface.mp4'))
|
197 |
-
|
198 |
-
# # data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path,
|
199 |
-
# # batch_size, input_yaw_list, input_pitch_list, input_roll_list,
|
200 |
-
# # expression_scale=args.expression_scale, still_mode=args.still, preprocess=args.preprocess, size=args.size)
|
201 |
-
|
202 |
-
|
203 |
-
# data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path,
|
204 |
-
# batch_size, input_yaw_list, input_pitch_list, input_roll_list,
|
205 |
-
# expression_scale=args.expression_scale, still_mode=args.still, preprocess=args.preprocess)
|
206 |
-
|
207 |
-
# # result, base64_video,temp_file_path= animate_from_coeff.generate(data, save_dir, pic_path, crop_info, \
|
208 |
-
# # enhancer=args.enhancer, background_enhancer=args.background_enhancer, preprocess=args.preprocess, img_size=args.size)
|
209 |
-
|
210 |
-
|
211 |
-
# result, base64_video,temp_file_path = animate_from_coeff.generate(data, save_dir, pic_path, crop_info, \
|
212 |
-
# enhancer=args.enhancer, background_enhancer=args.background_enhancer, preprocess=args.preprocess)
|
213 |
|
214 |
-
|
215 |
|
216 |
-
|
217 |
-
|
218 |
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
|
224 |
# shutil.move(result, save_dir+'.mp4')
|
225 |
|
|
|
115 |
mapping_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'mapping_00229-model.pth.tar')
|
116 |
facerender_yaml_path = os.path.join(current_root_path, 'src', 'config', 'facerender.yaml')
|
117 |
|
118 |
+
|
119 |
+
# preprocess_model = CropAndExtract(sadtalker_paths, device)
|
120 |
+
#init model
|
121 |
+
print(path_of_net_recon_model)
|
122 |
+
preprocess_model = CropAndExtract(path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting, device)
|
123 |
+
|
124 |
+
# audio_to_coeff = Audio2Coeff(sadtalker_paths, device)
|
125 |
+
audio_to_coeff = Audio2Coeff(audio2pose_checkpoint, audio2pose_yaml_path,
|
126 |
+
audio2exp_checkpoint, audio2exp_yaml_path,
|
127 |
+
wav2lip_checkpoint, device)
|
128 |
+
# animate_from_coeff = AnimateFromCoeff(sadtalker_paths, device)
|
129 |
+
animate_from_coeff = AnimateFromCoeff(free_view_checkpoint, mapping_checkpoint,
|
130 |
+
facerender_yaml_path, device)
|
131 |
+
|
132 |
+
first_frame_dir = os.path.join(save_dir, 'first_frame_dir')
|
133 |
+
os.makedirs(first_frame_dir, exist_ok=True)
|
134 |
+
# first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess,\
|
135 |
+
# source_image_flag=True, pic_size=args.size)
|
136 |
+
|
137 |
+
|
138 |
+
first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess, source_image_flag=True)
|
139 |
+
print('first_coeff_path ',first_coeff_path)
|
140 |
+
print('crop_pic_path ',crop_pic_path)
|
141 |
+
|
142 |
+
if first_coeff_path is None:
|
143 |
+
print("Can't get the coeffs of the input")
|
144 |
+
return
|
145 |
+
|
146 |
+
if ref_eyeblink is not None:
|
147 |
+
ref_eyeblink_videoname = os.path.splitext(os.path.split(ref_eyeblink)[-1])[0]
|
148 |
+
ref_eyeblink_frame_dir = os.path.join(save_dir, ref_eyeblink_videoname)
|
149 |
+
os.makedirs(ref_eyeblink_frame_dir, exist_ok=True)
|
150 |
+
# ref_eyeblink_coeff_path, _, _ = preprocess_model.generate(ref_eyeblink, ref_eyeblink_frame_dir, args.preprocess, source_image_flag=False)
|
151 |
+
ref_eyeblink_coeff_path, _, _ = preprocess_model.generate(ref_eyeblink, ref_eyeblink_frame_dir)
|
152 |
+
else:
|
153 |
+
ref_eyeblink_coeff_path=None
|
154 |
+
print('ref_eyeblink_coeff_path',ref_eyeblink_coeff_path)
|
155 |
+
|
156 |
+
if ref_pose is not None:
|
157 |
+
if ref_pose == ref_eyeblink:
|
158 |
+
ref_pose_coeff_path = ref_eyeblink_coeff_path
|
159 |
+
else:
|
160 |
+
ref_pose_videoname = os.path.splitext(os.path.split(ref_pose)[-1])[0]
|
161 |
+
ref_pose_frame_dir = os.path.join(save_dir, ref_pose_videoname)
|
162 |
+
os.makedirs(ref_pose_frame_dir, exist_ok=True)
|
163 |
+
# ref_pose_coeff_path, _, _ = preprocess_model.generate(ref_pose, ref_pose_frame_dir, args.preprocess, source_image_flag=False)
|
164 |
+
ref_pose_coeff_path, _, _ = preprocess_model.generate(ref_pose, ref_pose_frame_dir)
|
165 |
+
else:
|
166 |
+
ref_pose_coeff_path=None
|
167 |
+
print('ref_eyeblink_coeff_path',ref_pose_coeff_path)
|
168 |
+
|
169 |
+
batch = get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=args.still)
|
170 |
+
coeff_path = audio_to_coeff.generate(batch, save_dir, pose_style, ref_pose_coeff_path)
|
171 |
+
|
172 |
+
|
173 |
+
if args.face3dvis:
|
174 |
+
from src.face3d.visualize import gen_composed_video
|
175 |
+
gen_composed_video(args, device, first_coeff_path, coeff_path, audio_path, os.path.join(save_dir, '3dface.mp4'))
|
176 |
+
|
177 |
+
# data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path,
|
178 |
+
# batch_size, input_yaw_list, input_pitch_list, input_roll_list,
|
179 |
+
# expression_scale=args.expression_scale, still_mode=args.still, preprocess=args.preprocess, size=args.size)
|
180 |
+
|
181 |
+
|
182 |
+
data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path,
|
183 |
+
batch_size, input_yaw_list, input_pitch_list, input_roll_list,
|
184 |
+
expression_scale=args.expression_scale, still_mode=args.still, preprocess=args.preprocess)
|
185 |
+
|
186 |
+
# result, base64_video,temp_file_path= animate_from_coeff.generate(data, save_dir, pic_path, crop_info, \
|
187 |
+
# enhancer=args.enhancer, background_enhancer=args.background_enhancer, preprocess=args.preprocess, img_size=args.size)
|
188 |
+
|
189 |
+
|
190 |
+
result, base64_video,temp_file_path = animate_from_coeff.generate(data, save_dir, pic_path, crop_info, \
|
191 |
+
enhancer=args.enhancer, background_enhancer=args.background_enhancer, preprocess=args.preprocess)
|
192 |
+
|
193 |
+
face_path = temp_file_path
|
194 |
audio_path = "/home/user/app/images/audio_1.mp3" # Replace with the path to your audio file
|
195 |
+
# temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
|
196 |
+
# temp_file_path = temp_file.name
|
197 |
output_path = temp_file_path
|
198 |
|
199 |
# Call the function
|
|
|
211 |
LNet_batch_size=16,
|
212 |
without_rl1=False
|
213 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
|
215 |
+
print('The video is generated')
|
216 |
|
217 |
+
video_clip = mp.VideoFileClip(temp_file_path)
|
218 |
+
duration = video_clip.duration
|
219 |
|
220 |
+
app.config['temp_response'] = base64_video
|
221 |
+
app.config['final_video_path'] = temp_file_path
|
222 |
+
app.config['final_video_duration'] = duration
|
223 |
+
return base64_video, temp_file_path, duration
|
224 |
|
225 |
# shutil.move(result, save_dir+'.mp4')
|
226 |
|