Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -32,7 +32,7 @@ import json
|
|
32 |
|
33 |
|
34 |
class AnimationConfig:
|
35 |
-
def __init__(self, driven_audio_path, source_image_path, result_folder,pose_style,expression_scale,enhancer,still,preprocess,ref_pose_video_path):
|
36 |
self.driven_audio = driven_audio_path
|
37 |
self.source_image = source_image_path
|
38 |
self.ref_eyeblink = None
|
@@ -64,6 +64,7 @@ class AnimationConfig:
|
|
64 |
self.z_near = 5.
|
65 |
self.z_far = 15.
|
66 |
self.device = 'cpu'
|
|
|
67 |
|
68 |
|
69 |
app = Flask(__name__)
|
@@ -93,6 +94,7 @@ def main(args):
|
|
93 |
ref_eyeblink = args.ref_eyeblink
|
94 |
ref_pose = args.ref_pose
|
95 |
preprocess = args.preprocess
|
|
|
96 |
|
97 |
dir_path = os.path.dirname(os.path.realpath(__file__))
|
98 |
current_root_path = dir_path
|
@@ -139,8 +141,30 @@ def main(args):
|
|
139 |
# first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess,\
|
140 |
# source_image_flag=True, pic_size=args.size)
|
141 |
|
142 |
-
|
143 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
print('first_coeff_path ',first_coeff_path)
|
145 |
print('crop_pic_path ',crop_pic_path)
|
146 |
|
@@ -316,6 +340,20 @@ def ryzedb_chat_avatar(question):
|
|
316 |
return None
|
317 |
|
318 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
319 |
|
320 |
@app.route("/run", methods=['POST'])
|
321 |
def generate_video():
|
@@ -337,6 +375,7 @@ def generate_video():
|
|
337 |
return jsonify({'error': 'Input text prompt cannot be blank'}), 400
|
338 |
|
339 |
voice_cloning = request.form.get('voice_cloning', 'no')
|
|
|
340 |
chat_model_used = request.form.get('chat_model_used', 'openai')
|
341 |
target_language = request.form.get('target_language', 'original_text')
|
342 |
print('target_language',target_language)
|
@@ -458,7 +497,7 @@ def generate_video():
|
|
458 |
return "An error occurred", 500
|
459 |
|
460 |
# Example of using the class with some hypothetical paths
|
461 |
-
args = AnimationConfig(driven_audio_path=driven_audio_path, source_image_path=source_image_path, result_folder=result_folder, pose_style=pose_style, expression_scale=expression_scale,enhancer=enhancer,still=still,preprocess=preprocess,ref_pose_video_path=ref_pose_video_path)
|
462 |
|
463 |
if torch.cuda.is_available() and not args.cpu:
|
464 |
args.device = "cuda"
|
@@ -506,14 +545,12 @@ def check_generation_status():
|
|
506 |
os.remove(final_video_path)
|
507 |
print("Deleted video file:", final_video_path)
|
508 |
|
509 |
-
TEMP_DIR.cleanup()
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
# else:
|
516 |
-
# print("Temporary Directory is None or already cleaned up.")
|
517 |
end_time = time.time()
|
518 |
total_time = round(end_time - start_time, 2)
|
519 |
print("Total time taken for execution:", total_time, " seconds")
|
|
|
32 |
|
33 |
|
34 |
class AnimationConfig:
|
35 |
+
def __init__(self, driven_audio_path, source_image_path, result_folder,pose_style,expression_scale,enhancer,still,preprocess,ref_pose_video_path, image_hardcoded):
|
36 |
self.driven_audio = driven_audio_path
|
37 |
self.source_image = source_image_path
|
38 |
self.ref_eyeblink = None
|
|
|
64 |
self.z_near = 5.
|
65 |
self.z_far = 15.
|
66 |
self.device = 'cpu'
|
67 |
+
self.image_hardcoded = image_hardcoded
|
68 |
|
69 |
|
70 |
app = Flask(__name__)
|
|
|
94 |
ref_eyeblink = args.ref_eyeblink
|
95 |
ref_pose = args.ref_pose
|
96 |
preprocess = args.preprocess
|
97 |
+
image_hardcoded = args.image_hardcoded
|
98 |
|
99 |
dir_path = os.path.dirname(os.path.realpath(__file__))
|
100 |
current_root_path = dir_path
|
|
|
141 |
# first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess,\
|
142 |
# source_image_flag=True, pic_size=args.size)
|
143 |
|
144 |
+
preprocess_dir = os.path.join(TEMP_DIR.name, "preprocess_data")
|
145 |
+
os.makedirs(preprocess_dir, exist_ok=True)
|
146 |
+
preprocessed_data_path = os.path.join(preprocess_dir, "preprocessed_data.pkl")
|
147 |
+
|
148 |
+
if os.path.exists(preprocessed_data_path) and image_hardcoded == "yes":
|
149 |
+
print("Loading preprocessed data...")
|
150 |
+
with open(preprocessed_data_path, "rb") as f:
|
151 |
+
preprocessed_data = pickle.load(f)
|
152 |
+
first_coeff_path = preprocessed_data["first_coeff_path"]
|
153 |
+
crop_pic_path = preprocessed_data["crop_pic_path"]
|
154 |
+
crop_info = preprocessed_data["crop_info"]
|
155 |
+
print(f"Loaded existing preprocessed data from: {preprocessed_data_path}")
|
156 |
+
|
157 |
+
else:
|
158 |
+
print("Running preprocessing...")
|
159 |
+
first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess, source_image_flag=True)
|
160 |
+
preprocessed_data = {"first_coeff_path": first_coeff_path,
|
161 |
+
"crop_pic_path": crop_pic_path,
|
162 |
+
"crop_info": crop_info}
|
163 |
+
|
164 |
+
with open(preprocessed_data_path, "wb") as f:
|
165 |
+
pickle.dump(preprocessed_data, f)
|
166 |
+
print(f"Preprocessed data saved to: {preprocessed_data_path}")
|
167 |
+
|
168 |
print('first_coeff_path ',first_coeff_path)
|
169 |
print('crop_pic_path ',crop_pic_path)
|
170 |
|
|
|
340 |
return None
|
341 |
|
342 |
|
343 |
+
def custom_cleanup(temp_dir, exclude_dir):
|
344 |
+
# Iterate over the files and directories in TEMP_DIR
|
345 |
+
for filename in os.listdir(temp_dir):
|
346 |
+
file_path = os.path.join(temp_dir, filename)
|
347 |
+
# Skip the directory we want to exclude
|
348 |
+
if file_path != exclude_dir:
|
349 |
+
try:
|
350 |
+
if os.path.isdir(file_path):
|
351 |
+
shutil.rmtree(file_path)
|
352 |
+
else:
|
353 |
+
os.remove(file_path)
|
354 |
+
print(f"Deleted: {file_path}")
|
355 |
+
except Exception as e:
|
356 |
+
print(f"Failed to delete {file_path}. Reason: {e}")
|
357 |
|
358 |
@app.route("/run", methods=['POST'])
|
359 |
def generate_video():
|
|
|
375 |
return jsonify({'error': 'Input text prompt cannot be blank'}), 400
|
376 |
|
377 |
voice_cloning = request.form.get('voice_cloning', 'no')
|
378 |
+
image_hardcoded = request.form.get('image_hardcoded', 'yes')
|
379 |
chat_model_used = request.form.get('chat_model_used', 'openai')
|
380 |
target_language = request.form.get('target_language', 'original_text')
|
381 |
print('target_language',target_language)
|
|
|
497 |
return "An error occurred", 500
|
498 |
|
499 |
# Example of using the class with some hypothetical paths
|
500 |
+
args = AnimationConfig(driven_audio_path=driven_audio_path, source_image_path=source_image_path, result_folder=result_folder, pose_style=pose_style, expression_scale=expression_scale,enhancer=enhancer,still=still,preprocess=preprocess,ref_pose_video_path=ref_pose_video_path, image_hardcoded=image_hardcoded)
|
501 |
|
502 |
if torch.cuda.is_available() and not args.cpu:
|
503 |
args.device = "cuda"
|
|
|
545 |
os.remove(final_video_path)
|
546 |
print("Deleted video file:", final_video_path)
|
547 |
|
548 |
+
# TEMP_DIR.cleanup()
|
549 |
+
preprocess_dir = os.path.join(TEMP_DIR.name, "preprocess_data")
|
550 |
+
custom_cleanup(TEMP_DIR.name, preprocess_dir)
|
551 |
+
|
552 |
+
print("Temporary files cleaned up, but preprocess_data is retained.")
|
553 |
+
|
|
|
|
|
554 |
end_time = time.time()
|
555 |
total_time = round(end_time - start_time, 2)
|
556 |
print("Total time taken for execution:", total_time, " seconds")
|