Spanicin commited on
Commit
5972846
·
verified ·
1 Parent(s): 075ab37

Update app_parallel.py

Browse files
Files changed (1) hide show
  1. app_parallel.py +23 -7
app_parallel.py CHANGED
@@ -72,6 +72,7 @@ app = Flask(__name__)
72
 
73
  TEMP_DIR = None
74
  start_time = None
 
75
 
76
  app.config['temp_response'] = None
77
  app.config['generation_thread'] = None
@@ -273,7 +274,7 @@ def split_audio(audio_path, chunk_duration):
273
 
274
  # Generator function to yield chunk results as they are processed
275
  def generate_chunks(audio_chunks, preprocessed_data, args):
276
- future_to_chunk = {executor.submit(process_chunk, chunk[1], preprocessed_data, args): chunk[0] for chunk in audio_chunks}
277
 
278
  for future in as_completed(future_to_chunk):
279
  idx = future_to_chunk[future] # Get the original chunk that was processed
@@ -349,13 +350,28 @@ def parallel_processing():
349
  print(f"Splitting the audio into {chunk_duration}-second chunks...")
350
  audio_chunks = split_audio(driven_audio_path, chunk_duration=chunk_duration)
351
  print(f"Audio has been split into {len(audio_chunks)} chunks: {audio_chunks}")
352
-
353
- try:
354
- return stream_with_context(generate_chunks(audio_chunks, preprocessed_data, args))
355
- # base64_video, temp_file_path, duration = process_chunk(driven_audio_path, preprocessed_data, args)
356
- except Exception as e:
357
- return jsonify({'status': 'error', 'message': str(e)}), 500
358
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359
 
360
  @app.route("/health", methods=["GET"])
361
  def health_status():
 
72
 
73
  TEMP_DIR = None
74
  start_time = None
75
+ future_to_chunk = {}
76
 
77
  app.config['temp_response'] = None
78
  app.config['generation_thread'] = None
 
274
 
275
  # Generator function to yield chunk results as they are processed
276
  def generate_chunks(audio_chunks, preprocessed_data, args):
277
+ # future_to_chunk = {executor.submit(process_chunk, chunk[1], preprocessed_data, args): chunk[0] for chunk in audio_chunks}
278
 
279
  for future in as_completed(future_to_chunk):
280
  idx = future_to_chunk[future] # Get the original chunk that was processed
 
350
  print(f"Splitting the audio into {chunk_duration}-second chunks...")
351
  audio_chunks = split_audio(driven_audio_path, chunk_duration=chunk_duration)
352
  print(f"Audio has been split into {len(audio_chunks)} chunks: {audio_chunks}")
 
 
 
 
 
 
353
 
354
+ future_to_chunk = {executor.submit(process_chunk, chunk[1], preprocessed_data, args): chunk[0] for chunk in audio_chunks}
355
+ return jsonify({"status": "processing started"}), 200
356
+ # try:
357
+ # return stream_with_context(generate_chunks(audio_chunks, preprocessed_data, args))
358
+ # # base64_video, temp_file_path, duration = process_chunk(driven_audio_path, preprocessed_data, args)
359
+ # except Exception as e:
360
+ # return jsonify({'status': 'error', 'message': str(e)}), 500
361
+
362
+ @app.route("/stream", methods=["GET"])
363
+ def stream_results():
364
+ global future_to_chunk
365
+ def generate():
366
+ for future in as_completed(future_results):
367
+ idx = future_results.index(future)
368
+ try:
369
+ base64_video, temp_file_path = future.result()
370
+ yield json.dumps({'start_time': idx, 'path': temp_file_path}).encode('utf-8')
371
+ except Exception as e:
372
+ yield json.dumps({'start_time': idx, 'error': str(e)}).encode('utf-8')
373
+
374
+ return stream_with_context(generate())
375
 
376
  @app.route("/health", methods=["GET"])
377
  def health_status():