Spaces:
Paused
Paused
Update app_parallel.py
Browse files- app_parallel.py +21 -21
app_parallel.py
CHANGED
@@ -275,7 +275,7 @@ def split_audio(audio_path, chunk_duration):
|
|
275 |
|
276 |
# Generator function to yield chunk results as they are processed
|
277 |
def generate_chunks(audio_chunks, preprocessed_data, args):
|
278 |
-
|
279 |
|
280 |
for future in as_completed(future_to_chunk):
|
281 |
idx = future_to_chunk[future] # Get the original chunk that was processed
|
@@ -352,27 +352,27 @@ def parallel_processing():
|
|
352 |
audio_chunks = split_audio(driven_audio_path, chunk_duration=chunk_duration)
|
353 |
print(f"Audio has been split into {len(audio_chunks)} chunks: {audio_chunks}")
|
354 |
|
355 |
-
future_to_chunk = {executor.submit(process_chunk, chunk[1], preprocessed_data, args): chunk[0] for chunk in audio_chunks}
|
356 |
-
return jsonify({"status": "processing started"}), 200
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
@app.route("/stream", methods=["GET"])
|
364 |
-
def stream_results():
|
365 |
-
global future_to_chunk
|
366 |
-
def generate():
|
367 |
-
for future in as_completed(future_to_chunk):
|
368 |
-
idx = future_to_chunk[future]
|
369 |
-
try:
|
370 |
-
base64_video, temp_file_path = future.result()
|
371 |
-
yield json.dumps({'start_time': idx, 'path': temp_file_path}).encode('utf-8')
|
372 |
-
except Exception as e:
|
373 |
-
yield json.dumps({'start_time': idx, 'error': str(e)}).encode('utf-8')
|
374 |
|
375 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
376 |
|
377 |
@app.route("/health", methods=["GET"])
|
378 |
def health_status():
|
|
|
275 |
|
276 |
# Generator function to yield chunk results as they are processed
|
277 |
def generate_chunks(audio_chunks, preprocessed_data, args):
|
278 |
+
future_to_chunk = {executor.submit(process_chunk, chunk[1], preprocessed_data, args): chunk[0] for chunk in audio_chunks}
|
279 |
|
280 |
for future in as_completed(future_to_chunk):
|
281 |
idx = future_to_chunk[future] # Get the original chunk that was processed
|
|
|
352 |
audio_chunks = split_audio(driven_audio_path, chunk_duration=chunk_duration)
|
353 |
print(f"Audio has been split into {len(audio_chunks)} chunks: {audio_chunks}")
|
354 |
|
355 |
+
# future_to_chunk = {executor.submit(process_chunk, chunk[1], preprocessed_data, args): chunk[0] for chunk in audio_chunks}
|
356 |
+
# return jsonify({"status": "processing started"}), 200
|
357 |
+
try:
|
358 |
+
return stream_with_context(generate_chunks(audio_chunks, preprocessed_data, args))
|
359 |
+
# base64_video, temp_file_path, duration = process_chunk(driven_audio_path, preprocessed_data, args)
|
360 |
+
except Exception as e:
|
361 |
+
return jsonify({'status': 'error', 'message': str(e)}), 500
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
362 |
|
363 |
+
# @app.route("/stream", methods=["GET"])
|
364 |
+
# def stream_results():
|
365 |
+
# global future_to_chunk
|
366 |
+
# def generate():
|
367 |
+
# for future in as_completed(future_to_chunk):
|
368 |
+
# idx = future_to_chunk[future]
|
369 |
+
# try:
|
370 |
+
# base64_video, temp_file_path = future.result()
|
371 |
+
# yield json.dumps({'start_time': idx, 'path': temp_file_path}).encode('utf-8')
|
372 |
+
# except Exception as e:
|
373 |
+
# yield json.dumps({'start_time': idx, 'error': str(e)}).encode('utf-8')
|
374 |
+
|
375 |
+
# return stream_with_context(generate())
|
376 |
|
377 |
@app.route("/health", methods=["GET"])
|
378 |
def health_status():
|