Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,6 @@ import tempfile
|
|
13 |
import uuid
|
14 |
import time
|
15 |
import threading
|
16 |
-
from queue import Queue
|
17 |
|
18 |
torch.set_float32_matmul_precision("medium")
|
19 |
|
@@ -62,16 +61,10 @@ cleanup_thread = threading.Thread(target=cleanup_temp_files, daemon=True)
|
|
62 |
cleanup_thread.start()
|
63 |
|
64 |
|
65 |
-
# Function to process frames in a separate thread
|
66 |
-
def process_frame(image, bg, fast_mode, result_queue):
|
67 |
-
processed_image = process(image, bg, fast_mode)
|
68 |
-
result_queue.put(processed_image)
|
69 |
-
|
70 |
-
|
71 |
@spaces.GPU
|
72 |
def fn(vid, bg_type="Color", bg_image=None, bg_video=None, color="#00FF00", fps=0, video_handling="slow_down", fast_mode=True):
|
73 |
try:
|
74 |
-
start_time = time.time()
|
75 |
|
76 |
# Load the video using moviepy
|
77 |
video = mp.VideoFileClip(vid)
|
@@ -103,50 +96,29 @@ def fn(vid, bg_type="Color", bg_image=None, bg_video=None, color="#00FF00", fps=
|
|
103 |
|
104 |
bg_frame_index = 0 # Initialize background frame index
|
105 |
|
106 |
-
threads = []
|
107 |
-
result_queue = Queue()
|
108 |
-
frame_batch_size = 4 # Process 4 frames at a time
|
109 |
-
|
110 |
for i, frame in enumerate(frames):
|
111 |
pil_image = Image.fromarray(frame)
|
112 |
if bg_type == "Color":
|
113 |
-
|
114 |
elif bg_type == "Image":
|
115 |
-
|
116 |
elif bg_type == "Video":
|
117 |
if video_handling == "slow_down":
|
118 |
background_frame = background_frames[bg_frame_index % len(background_frames)]
|
119 |
bg_frame_index += 1
|
120 |
background_image = Image.fromarray(background_frame)
|
|
|
121 |
else: # video_handling == "loop"
|
122 |
background_frame = background_frames[bg_frame_index % len(background_frames)]
|
123 |
bg_frame_index += 1
|
124 |
background_image = Image.fromarray(background_frame)
|
|
|
125 |
else:
|
126 |
-
|
127 |
-
|
128 |
-
# Start a new thread to process the frame
|
129 |
-
thread = threading.Thread(target=process_frame, args=(pil_image, background_image, fast_mode, result_queue))
|
130 |
-
threads.append(thread)
|
131 |
-
thread.start()
|
132 |
-
|
133 |
-
# If we have enough threads running or it's the last frame, wait for results
|
134 |
-
if len(threads) == frame_batch_size or i == len(list(frames)) - 1:
|
135 |
-
for thread in threads:
|
136 |
-
thread.join()
|
137 |
-
while not result_queue.empty():
|
138 |
-
processed_frames.append(np.array(result_queue.get()))
|
139 |
-
threads = [] # Reset the threads list
|
140 |
|
|
|
141 |
elapsed_time = time.time() - start_time
|
142 |
-
|
143 |
-
# Yield the first processed image from the current batch if available
|
144 |
-
if processed_frames:
|
145 |
-
index = -len(threads) if threads else -min(frame_batch_size, len(processed_frames))
|
146 |
-
yield processed_frames[index], None, f"Processing frame {i+1}... Elapsed time: {elapsed_time:.2f} seconds"
|
147 |
-
else:
|
148 |
-
yield None, None, f"Processing frame {i+1}... Elapsed time: {elapsed_time:.2f} seconds"
|
149 |
-
|
150 |
|
151 |
# Create a new video from the processed frames
|
152 |
processed_video = mp.ImageSequenceClip(processed_frames, fps=fps)
|
@@ -164,7 +136,7 @@ def fn(vid, bg_type="Color", bg_image=None, bg_video=None, color="#00FF00", fps=
|
|
164 |
elapsed_time = time.time() - start_time
|
165 |
yield gr.update(visible=False), gr.update(visible=True), f"Processing complete! Elapsed time: {elapsed_time:.2f} seconds"
|
166 |
# Return the path to the temporary file
|
167 |
-
yield
|
168 |
|
169 |
except Exception as e:
|
170 |
print(f"Error: {e}")
|
|
|
13 |
import uuid
|
14 |
import time
|
15 |
import threading
|
|
|
16 |
|
17 |
torch.set_float32_matmul_precision("medium")
|
18 |
|
|
|
61 |
cleanup_thread.start()
|
62 |
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
@spaces.GPU
|
65 |
def fn(vid, bg_type="Color", bg_image=None, bg_video=None, color="#00FF00", fps=0, video_handling="slow_down", fast_mode=True):
|
66 |
try:
|
67 |
+
start_time = time.time() # Start the timer
|
68 |
|
69 |
# Load the video using moviepy
|
70 |
video = mp.VideoFileClip(vid)
|
|
|
96 |
|
97 |
bg_frame_index = 0 # Initialize background frame index
|
98 |
|
|
|
|
|
|
|
|
|
99 |
for i, frame in enumerate(frames):
|
100 |
pil_image = Image.fromarray(frame)
|
101 |
if bg_type == "Color":
|
102 |
+
processed_image = process(pil_image, color, fast_mode)
|
103 |
elif bg_type == "Image":
|
104 |
+
processed_image = process(pil_image, bg_image, fast_mode)
|
105 |
elif bg_type == "Video":
|
106 |
if video_handling == "slow_down":
|
107 |
background_frame = background_frames[bg_frame_index % len(background_frames)]
|
108 |
bg_frame_index += 1
|
109 |
background_image = Image.fromarray(background_frame)
|
110 |
+
processed_image = process(pil_image, background_image, fast_mode)
|
111 |
else: # video_handling == "loop"
|
112 |
background_frame = background_frames[bg_frame_index % len(background_frames)]
|
113 |
bg_frame_index += 1
|
114 |
background_image = Image.fromarray(background_frame)
|
115 |
+
processed_image = process(pil_image, background_image, fast_mode)
|
116 |
else:
|
117 |
+
processed_image = pil_image # Default to original image if no background is selected
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
|
119 |
+
processed_frames.append(np.array(processed_image))
|
120 |
elapsed_time = time.time() - start_time
|
121 |
+
yield processed_image, None, f"Processing frame {i+1}... Elapsed time: {elapsed_time:.2f} seconds"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
# Create a new video from the processed frames
|
124 |
processed_video = mp.ImageSequenceClip(processed_frames, fps=fps)
|
|
|
136 |
elapsed_time = time.time() - start_time
|
137 |
yield gr.update(visible=False), gr.update(visible=True), f"Processing complete! Elapsed time: {elapsed_time:.2f} seconds"
|
138 |
# Return the path to the temporary file
|
139 |
+
yield processed_image, temp_filepath, f"Processing complete! Elapsed time: {elapsed_time:.2f} seconds"
|
140 |
|
141 |
except Exception as e:
|
142 |
print(f"Error: {e}")
|