Spaces:
Paused
Paused
Update app_parallel.py
Browse files- app_parallel.py +40 -41
app_parallel.py
CHANGED
@@ -113,6 +113,7 @@ def process_chunk(audio_chunk, preprocessed_data, args):
|
|
113 |
with open(crop_info_path , "rb") as f:
|
114 |
crop_info = pickle.load(f)
|
115 |
|
|
|
116 |
print("first_coeff_path",first_coeff_path)
|
117 |
print("crop_pic_path",crop_pic_path)
|
118 |
print("crop_info",crop_info)
|
@@ -157,20 +158,19 @@ def save_uploaded_file(file, filename,TEMP_DIR):
|
|
157 |
return file_path
|
158 |
|
159 |
|
160 |
-
def custom_cleanup(temp_dir
|
161 |
# Iterate over the files and directories in TEMP_DIR
|
162 |
for filename in os.listdir(temp_dir):
|
163 |
file_path = os.path.join(temp_dir, filename)
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
print(f"Failed to delete {file_path}. Reason: {e}")
|
174 |
|
175 |
|
176 |
def generate_audio(voice_cloning, voice_gender, text_prompt):
|
@@ -225,8 +225,8 @@ def run_preprocessing(args):
|
|
225 |
global path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting
|
226 |
first_frame_dir = os.path.join(args.result_dir, 'first_frame_dir')
|
227 |
os.makedirs(first_frame_dir, exist_ok=True)
|
228 |
-
fixed_temp_dir = "/
|
229 |
-
os.makedirs(fixed_temp_dir, exist_ok=True)
|
230 |
preprocessed_data_path = os.path.join(fixed_temp_dir, "preprocessed_data.pkl")
|
231 |
|
232 |
if os.path.exists(preprocessed_data_path) and args.image_hardcoded == "yes":
|
@@ -234,29 +234,22 @@ def run_preprocessing(args):
|
|
234 |
with open(preprocessed_data_path, "rb") as f:
|
235 |
preprocessed_data = pickle.load(f)
|
236 |
print("Loaded existing preprocessed data from:", preprocessed_data_path)
|
237 |
-
else:
|
238 |
-
print("Running preprocessing...")
|
239 |
-
preprocess_model = CropAndExtract(path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting, args.device)
|
240 |
-
first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(args.source_image, first_frame_dir, args.preprocess, source_image_flag=True)
|
241 |
-
first_coeff_new_path = os.path.join(fixed_temp_dir, os.path.basename(first_coeff_path))
|
242 |
-
crop_pic_new_path = os.path.join(fixed_temp_dir, os.path.basename(crop_pic_path))
|
243 |
-
crop_info_new_path = os.path.join(fixed_temp_dir, "crop_info.pkl")
|
244 |
-
shutil.move(first_coeff_path, first_coeff_new_path)
|
245 |
-
shutil.move(crop_pic_path, crop_pic_new_path)
|
246 |
-
|
247 |
-
with open(crop_info_new_path, "wb") as f:
|
248 |
-
pickle.dump(crop_info, f)
|
249 |
-
|
250 |
-
preprocessed_data = {"first_coeff_path": first_coeff_new_path,
|
251 |
-
"crop_pic_path": crop_pic_new_path,
|
252 |
-
"crop_info": crop_info_new_path}
|
253 |
-
|
254 |
-
|
255 |
-
with open(preprocessed_data_path, "wb") as f:
|
256 |
-
pickle.dump(preprocessed_data, f)
|
257 |
-
print(f"Preprocessed data saved to: {preprocessed_data_path}")
|
258 |
|
259 |
return preprocessed_data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
260 |
|
261 |
def split_audio(audio_path, chunk_duration):
|
262 |
audio_clip = mp.AudioFileClip(audio_path)
|
@@ -277,13 +270,17 @@ def split_audio(audio_path, chunk_duration):
|
|
277 |
def generate_chunks(audio_chunks, preprocessed_data, args):
|
278 |
future_to_chunk = {executor.submit(process_chunk, chunk[1], preprocessed_data, args): chunk[0] for chunk in audio_chunks}
|
279 |
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
|
|
|
|
|
|
|
|
287 |
|
288 |
@app.route("/run", methods=['POST'])
|
289 |
def parallel_processing():
|
@@ -321,6 +318,8 @@ def parallel_processing():
|
|
321 |
print('preprocess selected: ',preprocess)
|
322 |
# ref_pose_video = request.files.get('ref_pose', None)
|
323 |
|
|
|
|
|
324 |
app.config['text_prompt'] = text_prompt
|
325 |
print('Final output text prompt using openai: ',text_prompt)
|
326 |
|
|
|
113 |
with open(crop_info_path , "rb") as f:
|
114 |
crop_info = pickle.load(f)
|
115 |
|
116 |
+
print(f"Loaded existing preprocessed data")
|
117 |
print("first_coeff_path",first_coeff_path)
|
118 |
print("crop_pic_path",crop_pic_path)
|
119 |
print("crop_info",crop_info)
|
|
|
158 |
return file_path
|
159 |
|
160 |
|
161 |
+
def custom_cleanup(temp_dir):
|
162 |
# Iterate over the files and directories in TEMP_DIR
|
163 |
for filename in os.listdir(temp_dir):
|
164 |
file_path = os.path.join(temp_dir, filename)
|
165 |
+
if os.path.isdir(file_path):
|
166 |
+
shutil.rmtree(file_path)
|
167 |
+
else:
|
168 |
+
os.remove(file_path)
|
169 |
+
print(f"Deleted: {file_path}")
|
170 |
+
|
171 |
+
torch.cuda.empty_cache()
|
172 |
+
import gc
|
173 |
+
gc.collect()
|
|
|
174 |
|
175 |
|
176 |
def generate_audio(voice_cloning, voice_gender, text_prompt):
|
|
|
225 |
global path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting
|
226 |
first_frame_dir = os.path.join(args.result_dir, 'first_frame_dir')
|
227 |
os.makedirs(first_frame_dir, exist_ok=True)
|
228 |
+
fixed_temp_dir = "/home/user/app/preprocess_data/"
|
229 |
+
# os.makedirs(fixed_temp_dir, exist_ok=True)
|
230 |
preprocessed_data_path = os.path.join(fixed_temp_dir, "preprocessed_data.pkl")
|
231 |
|
232 |
if os.path.exists(preprocessed_data_path) and args.image_hardcoded == "yes":
|
|
|
234 |
with open(preprocessed_data_path, "rb") as f:
|
235 |
preprocessed_data = pickle.load(f)
|
236 |
print("Loaded existing preprocessed data from:", preprocessed_data_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
237 |
|
238 |
return preprocessed_data
|
239 |
+
|
240 |
+
client = OpenAI(api_key="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")
|
241 |
+
|
242 |
+
def openai_chat_avatar(text_prompt):
|
243 |
+
response = client.chat.completions.create(
|
244 |
+
model="gpt-4o-mini",
|
245 |
+
messages=[{"role": "system", "content": "Ensure answers are concise, human-like, and clear while maintaining quality. Use the fewest possible words, avoiding unnecessary articles, prepositions, and adjectives. Responses should be short but still address the question thoroughly without being verbose.Keep them to one sentence only"},
|
246 |
+
{"role": "user", "content": f"Hi! I need help with something. {text_prompt}"},
|
247 |
+
],
|
248 |
+
max_tokens = len(text_prompt) + 300 # Use the length of the input text
|
249 |
+
# temperature=0.3,
|
250 |
+
# stop=["Translate:", "Text:"]
|
251 |
+
)
|
252 |
+
return response
|
253 |
|
254 |
def split_audio(audio_path, chunk_duration):
|
255 |
audio_clip = mp.AudioFileClip(audio_path)
|
|
|
270 |
def generate_chunks(audio_chunks, preprocessed_data, args):
|
271 |
future_to_chunk = {executor.submit(process_chunk, chunk[1], preprocessed_data, args): chunk[0] for chunk in audio_chunks}
|
272 |
|
273 |
+
try:
|
274 |
+
for future in as_completed(future_to_chunk):
|
275 |
+
idx = future_to_chunk[future] # Get the original chunk that was processed
|
276 |
+
try:
|
277 |
+
base64_video, temp_file_path = future.result() # Get the result of the completed task
|
278 |
+
yield json.dumps({'start_time': idx, 'base64_video': base64_video}).encode('utf-8')
|
279 |
+
except Exception as e:
|
280 |
+
yield f"Task for chunk {idx} failed: {e}\n"
|
281 |
+
finally:
|
282 |
+
if TEMP_DIR:
|
283 |
+
custom_cleanup(TEMP_DIR.name)
|
284 |
|
285 |
@app.route("/run", methods=['POST'])
|
286 |
def parallel_processing():
|
|
|
318 |
print('preprocess selected: ',preprocess)
|
319 |
# ref_pose_video = request.files.get('ref_pose', None)
|
320 |
|
321 |
+
# response = openai_chat_avatar(text_prompt)
|
322 |
+
# text_prompt = response.choices[0].message.content.strip()
|
323 |
app.config['text_prompt'] = text_prompt
|
324 |
print('Final output text prompt using openai: ',text_prompt)
|
325 |
|