Update app.py
Browse files
app.py
CHANGED
@@ -1,776 +1,3 @@
|
|
1 |
-
# import logging
|
2 |
-
# import random
|
3 |
-
# import warnings
|
4 |
-
# import gradio as gr
|
5 |
-
# import os
|
6 |
-
# import shutil
|
7 |
-
# import subprocess
|
8 |
-
# import spaces
|
9 |
-
# import torch
|
10 |
-
# import numpy as np
|
11 |
-
# from diffusers import FluxControlNetModel
|
12 |
-
# from diffusers.pipelines import FluxControlNetPipeline
|
13 |
-
# from PIL import Image
|
14 |
-
# from huggingface_hub import snapshot_download, login
|
15 |
-
# import io
|
16 |
-
# import base64
|
17 |
-
# from flask import Flask, request, jsonify
|
18 |
-
# from concurrent.futures import ThreadPoolExecutor
|
19 |
-
# from flask_cors import CORS
|
20 |
-
# import threading
|
21 |
-
|
22 |
-
# # Configure logging
|
23 |
-
# logging.basicConfig(level=logging.INFO)
|
24 |
-
# logger = logging.getLogger(__name__)
|
25 |
-
|
26 |
-
# app = Flask(__name__)
|
27 |
-
# CORS(app)
|
28 |
-
|
29 |
-
# # Function to check disk usage
|
30 |
-
# def check_disk_space():
|
31 |
-
# result = subprocess.run(['df', '-h'], capture_output=True, text=True)
|
32 |
-
# logger.info("Disk space usage:\n%s", result.stdout)
|
33 |
-
|
34 |
-
# # Function to clear Hugging Face cache
|
35 |
-
# def clear_huggingface_cache():
|
36 |
-
# cache_dir = os.path.expanduser('~/.cache/huggingface')
|
37 |
-
# if os.path.exists(cache_dir):
|
38 |
-
# shutil.rmtree(cache_dir) # Removes the entire cache directory
|
39 |
-
# logger.info("Cleared Hugging Face cache at: %s", cache_dir)
|
40 |
-
# else:
|
41 |
-
# logger.info("No Hugging Face cache found.")
|
42 |
-
|
43 |
-
# # Check disk space
|
44 |
-
# check_disk_space()
|
45 |
-
|
46 |
-
# # Clear Hugging Face cache
|
47 |
-
# clear_huggingface_cache()
|
48 |
-
|
49 |
-
# # Add config to store base64 images
|
50 |
-
# app.config['image_outputs'] = {}
|
51 |
-
|
52 |
-
# # ThreadPoolExecutor for managing image processing threads
|
53 |
-
# executor = ThreadPoolExecutor()
|
54 |
-
|
55 |
-
# # Determine the device (GPU or CPU)
|
56 |
-
# if torch.cuda.is_available():
|
57 |
-
# device = "cuda"
|
58 |
-
# logger.info("CUDA is available. Using GPU.")
|
59 |
-
# else:
|
60 |
-
# device = "cpu"
|
61 |
-
# logger.info("CUDA is not available. Using CPU.")
|
62 |
-
|
63 |
-
# # Load model from Huggingface Hub
|
64 |
-
# huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
65 |
-
# if huggingface_token:
|
66 |
-
# login(token=huggingface_token)
|
67 |
-
# logger.info("Hugging Face token found and logged in.")
|
68 |
-
# else:
|
69 |
-
# logger.warning("Hugging Face token not found in environment variables.")
|
70 |
-
|
71 |
-
# logger.info("Hugging Face token: %s", huggingface_token)
|
72 |
-
|
73 |
-
# # Download model using snapshot_download
|
74 |
-
|
75 |
-
# model_path = snapshot_download(
|
76 |
-
# repo_id="black-forest-labs/FLUX.1-dev",
|
77 |
-
# repo_type="model",
|
78 |
-
# ignore_patterns=["*.md", "*..gitattributes"],
|
79 |
-
# local_dir="FLUX.1-dev",
|
80 |
-
# token=huggingface_token)
|
81 |
-
# logger.info("Model downloaded to: %s", model_path)
|
82 |
-
|
83 |
-
# # Load pipeline
|
84 |
-
# logger.info('Loading ControlNet model.')
|
85 |
-
|
86 |
-
# controlnet = FluxControlNetModel.from_pretrained(
|
87 |
-
# "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
|
88 |
-
# ).to(device)
|
89 |
-
# logger.info("ControlNet model loaded successfully.")
|
90 |
-
|
91 |
-
# logger.info('Loading pipeline.')
|
92 |
-
|
93 |
-
# pipe = FluxControlNetPipeline.from_pretrained(
|
94 |
-
# model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
|
95 |
-
# ).to(device)
|
96 |
-
# logger.info("Pipeline loaded successfully.")
|
97 |
-
|
98 |
-
# MAX_SEED = 1000000
|
99 |
-
# MAX_PIXEL_BUDGET = 1024 * 1024
|
100 |
-
|
101 |
-
|
102 |
-
# @spaces.GPU
|
103 |
-
# def process_input(input_image, upscale_factor):
|
104 |
-
# w, h = input_image.size
|
105 |
-
# aspect_ratio = w / h
|
106 |
-
# was_resized = False
|
107 |
-
|
108 |
-
# # Resize if input size exceeds the maximum pixel budget
|
109 |
-
# if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
|
110 |
-
# warnings.warn(f"Requested output image is too large. Resizing to fit within pixel budget.")
|
111 |
-
# input_image = input_image.resize(
|
112 |
-
# (
|
113 |
-
# int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
|
114 |
-
# int(MAX_PIXEL_BUDGET**0.5 // aspect_ratio // upscale_factor),
|
115 |
-
# )
|
116 |
-
# )
|
117 |
-
# was_resized = True
|
118 |
-
|
119 |
-
# # Adjust dimensions to be a multiple of 8
|
120 |
-
# w, h = input_image.size
|
121 |
-
# w = w - w % 8
|
122 |
-
# h = h - h % 8
|
123 |
-
|
124 |
-
# return input_image.resize((w, h)), was_resized
|
125 |
-
|
126 |
-
# @spaces.GPU
|
127 |
-
# def run_inference(process_id, input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale):
|
128 |
-
# logger.info("Processing inference for process_id: %s", process_id)
|
129 |
-
# input_image, was_resized = process_input(input_image, upscale_factor)
|
130 |
-
|
131 |
-
# # Rescale image for ControlNet processing
|
132 |
-
# w, h = input_image.size
|
133 |
-
# control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
|
134 |
-
|
135 |
-
# # Set the random generator for inference
|
136 |
-
# generator = torch.Generator().manual_seed(seed)
|
137 |
-
|
138 |
-
# # Perform inference using the pipeline
|
139 |
-
# logger.info("Running pipeline for process_id: %s", process_id)
|
140 |
-
# image = pipe(
|
141 |
-
# prompt="",
|
142 |
-
# control_image=control_image,
|
143 |
-
# controlnet_conditioning_scale=controlnet_conditioning_scale,
|
144 |
-
# num_inference_steps=num_inference_steps,
|
145 |
-
# guidance_scale=3.5,
|
146 |
-
# height=control_image.size[1],
|
147 |
-
# width=control_image.size[0],
|
148 |
-
# generator=generator,
|
149 |
-
# ).images[0]
|
150 |
-
|
151 |
-
# # Resize output image back to the original dimensions if needed
|
152 |
-
# if was_resized:
|
153 |
-
# original_size = (input_image.width * upscale_factor, input_image.height * upscale_factor)
|
154 |
-
# image = image.resize(original_size)
|
155 |
-
|
156 |
-
# # Convert the output image to base64
|
157 |
-
# buffered = io.BytesIO()
|
158 |
-
# image.save(buffered, format="JPEG")
|
159 |
-
# image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
160 |
-
|
161 |
-
# # Store the result in the shared dictionary
|
162 |
-
# app.config['image_outputs'][process_id] = image_base64
|
163 |
-
# logger.info("Inference completed for process_id: %s", process_id)
|
164 |
-
|
165 |
-
# @app.route('/infer', methods=['POST'])
|
166 |
-
# def infer():
|
167 |
-
# # Check if the file was provided in the form-data
|
168 |
-
# if 'input_image' not in request.files:
|
169 |
-
# logger.error("No image file provided in request.")
|
170 |
-
# return jsonify({
|
171 |
-
# "status": "error",
|
172 |
-
# "message": "No input_image file provided"
|
173 |
-
# }), 400
|
174 |
-
|
175 |
-
# # Get the uploaded image file from the request
|
176 |
-
# file = request.files['input_image']
|
177 |
-
|
178 |
-
# # Check if a file was uploaded
|
179 |
-
# if file.filename == '':
|
180 |
-
# logger.error("No selected file in form-data.")
|
181 |
-
# return jsonify({
|
182 |
-
# "status": "error",
|
183 |
-
# "message": "No selected file"
|
184 |
-
# }), 400
|
185 |
-
|
186 |
-
# # Convert the image to Base64 for internal processing
|
187 |
-
# input_image = Image.open(file)
|
188 |
-
# buffered = io.BytesIO()
|
189 |
-
# input_image.save(buffered, format="JPEG")
|
190 |
-
|
191 |
-
# # Retrieve additional parameters from the request (if any)
|
192 |
-
# seed = request.form.get("seed", 42, type=int)
|
193 |
-
# randomize_seed = request.form.get("randomize_seed", 'true').lower() == 'true'
|
194 |
-
# num_inference_steps = request.form.get("num_inference_steps", 28, type=int)
|
195 |
-
# upscale_factor = request.form.get("upscale_factor", 4, type=int)
|
196 |
-
# controlnet_conditioning_scale = request.form.get("controlnet_conditioning_scale", 0.6, type=float)
|
197 |
-
|
198 |
-
# # Randomize seed if specified
|
199 |
-
# if randomize_seed:
|
200 |
-
# seed = random.randint(0, MAX_SEED)
|
201 |
-
# logger.info("Seed randomized to: %d", seed)
|
202 |
-
|
203 |
-
# # Create a unique process ID for this request
|
204 |
-
# process_id = str(random.randint(1000, 9999))
|
205 |
-
# logger.info("Process started with process_id: %s", process_id)
|
206 |
-
|
207 |
-
# # Set the status to 'in_progress'
|
208 |
-
# app.config['image_outputs'][process_id] = None
|
209 |
-
|
210 |
-
# # Run the inference in a separate thread
|
211 |
-
# executor.submit(run_inference, process_id, input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale)
|
212 |
-
|
213 |
-
# # Return the process ID
|
214 |
-
# return jsonify({
|
215 |
-
# "process_id": process_id,
|
216 |
-
# "message": "Processing started"
|
217 |
-
# })
|
218 |
-
|
219 |
-
|
220 |
-
# # Modify status endpoint to receive process_id in request body
|
221 |
-
# @app.route('/status', methods=['GET'])
|
222 |
-
# def status():
|
223 |
-
# # Get the process_id from the query parameters
|
224 |
-
# process_id = request.args.get('process_id')
|
225 |
-
|
226 |
-
# # Check if process_id was provided
|
227 |
-
# if not process_id:
|
228 |
-
# logger.error("Process ID not provided in request.")
|
229 |
-
# return jsonify({
|
230 |
-
# "status": "error",
|
231 |
-
# "message": "Process ID is required"
|
232 |
-
# }), 400
|
233 |
-
|
234 |
-
# # Check if the process_id exists in the dictionary
|
235 |
-
# if process_id not in app.config['image_outputs']:
|
236 |
-
# logger.error("Invalid process ID: %s", process_id)
|
237 |
-
# return jsonify({
|
238 |
-
# "status": "error",
|
239 |
-
# "message": "Invalid process ID"
|
240 |
-
# }), 404
|
241 |
-
|
242 |
-
# # Check the status of the image processing
|
243 |
-
# image_base64 = app.config['image_outputs'][process_id]
|
244 |
-
# if image_base64 is None:
|
245 |
-
# logger.info("Process ID %s is still in progress.", process_id)
|
246 |
-
# return jsonify({
|
247 |
-
# "status": "in_progress"
|
248 |
-
# })
|
249 |
-
# else:
|
250 |
-
# logger.info("Process ID %s completed successfully.", process_id)
|
251 |
-
# return jsonify({
|
252 |
-
# "status": "completed",
|
253 |
-
# "output_image": image_base64
|
254 |
-
# })
|
255 |
-
|
256 |
-
|
257 |
-
# if __name__ == '__main__':
|
258 |
-
# app.run(debug=True,host='0.0.0.0')
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
# import logging
|
265 |
-
# import random
|
266 |
-
# import warnings
|
267 |
-
# import gradio as gr
|
268 |
-
# import os
|
269 |
-
# import shutil
|
270 |
-
# import subprocess
|
271 |
-
# import torch
|
272 |
-
# import numpy as np
|
273 |
-
# from diffusers import FluxControlNetModel
|
274 |
-
# from diffusers.pipelines import FluxControlNetPipeline
|
275 |
-
# from PIL import Image
|
276 |
-
# from huggingface_hub import snapshot_download, login
|
277 |
-
# import io
|
278 |
-
# import base64
|
279 |
-
# import threading
|
280 |
-
|
281 |
-
# # Configure logging
|
282 |
-
# logging.basicConfig(level=logging.INFO)
|
283 |
-
# logger = logging.getLogger(__name__)
|
284 |
-
|
285 |
-
# # Function to check disk usage
|
286 |
-
# def check_disk_space():
|
287 |
-
# result = subprocess.run(['df', '-h'], capture_output=True, text=True)
|
288 |
-
# logger.info("Disk space usage:\n%s", result.stdout)
|
289 |
-
|
290 |
-
# # Function to clear Hugging Face cache
|
291 |
-
# def clear_huggingface_cache():
|
292 |
-
# cache_dir = os.path.expanduser('~/.cache/huggingface')
|
293 |
-
# if os.path.exists(cache_dir):
|
294 |
-
# shutil.rmtree(cache_dir) # Removes the entire cache directory
|
295 |
-
# logger.info("Cleared Hugging Face cache at: %s", cache_dir)
|
296 |
-
# else:
|
297 |
-
# logger.info("No Hugging Face cache found.")
|
298 |
-
|
299 |
-
# # Check disk space
|
300 |
-
# check_disk_space()
|
301 |
-
|
302 |
-
# # Clear Hugging Face cache
|
303 |
-
# clear_huggingface_cache()
|
304 |
-
|
305 |
-
# # Add config to store base64 images
|
306 |
-
# image_outputs = {}
|
307 |
-
|
308 |
-
# # Determine the device (GPU or CPU)
|
309 |
-
# if torch.cuda.is_available():
|
310 |
-
# device = "cuda"
|
311 |
-
# logger.info("CUDA is available. Using GPU.")
|
312 |
-
# else:
|
313 |
-
# device = "cpu"
|
314 |
-
# logger.info("CUDA is not available. Using CPU.")
|
315 |
-
|
316 |
-
# # Load model from Huggingface Hub
|
317 |
-
# huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
318 |
-
# if huggingface_token:
|
319 |
-
# login(token=huggingface_token)
|
320 |
-
# logger.info("Hugging Face token found and logged in.")
|
321 |
-
# else:
|
322 |
-
# logger.warning("Hugging Face token not found in environment variables.")
|
323 |
-
|
324 |
-
# # Download model using snapshot_download
|
325 |
-
# model_path = snapshot_download(
|
326 |
-
# repo_id="black-forest-labs/FLUX.1-dev",
|
327 |
-
# repo_type="model",
|
328 |
-
# ignore_patterns=["*.md", "*..gitattributes"],
|
329 |
-
# local_dir="FLUX.1-dev",
|
330 |
-
# token=huggingface_token
|
331 |
-
# )
|
332 |
-
# logger.info("Model downloaded to: %s", model_path)
|
333 |
-
|
334 |
-
# # Load pipeline
|
335 |
-
# logger.info('Loading ControlNet model.')
|
336 |
-
# controlnet = FluxControlNetModel.from_pretrained(
|
337 |
-
# "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
|
338 |
-
# ).to(device)
|
339 |
-
# logger.info("ControlNet model loaded successfully.")
|
340 |
-
|
341 |
-
# logger.info('Loading pipeline.')
|
342 |
-
# pipe = FluxControlNetPipeline.from_pretrained(
|
343 |
-
# model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
|
344 |
-
# ).to(device)
|
345 |
-
# logger.info("Pipeline loaded successfully.")
|
346 |
-
|
347 |
-
# MAX_SEED = 1000000
|
348 |
-
# MAX_PIXEL_BUDGET = 1024 * 1024
|
349 |
-
|
350 |
-
# def process_input(input_image, upscale_factor):
|
351 |
-
# w, h = input_image.size
|
352 |
-
# aspect_ratio = w / h
|
353 |
-
# was_resized = False
|
354 |
-
|
355 |
-
# # Resize if input size exceeds the maximum pixel budget
|
356 |
-
# if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
|
357 |
-
# warnings.warn(f"Requested output image is too large. Resizing to fit within pixel budget.")
|
358 |
-
# input_image = input_image.resize(
|
359 |
-
# (
|
360 |
-
# int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
|
361 |
-
# int(MAX_PIXEL_BUDGET**0.5 // aspect_ratio // upscale_factor),
|
362 |
-
# )
|
363 |
-
# )
|
364 |
-
# was_resized = True
|
365 |
-
|
366 |
-
# # Adjust dimensions to be a multiple of 8
|
367 |
-
# w, h = input_image.size
|
368 |
-
# w = w - w % 8
|
369 |
-
# h = h - h % 8
|
370 |
-
|
371 |
-
# return input_image.resize((w, h)), was_resized
|
372 |
-
|
373 |
-
# def run_inference(input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale):
|
374 |
-
# logger.info("Running inference")
|
375 |
-
# input_image, was_resized = process_input(input_image, upscale_factor)
|
376 |
-
|
377 |
-
# # Rescale image for ControlNet processing
|
378 |
-
# w, h = input_image.size
|
379 |
-
# control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
|
380 |
-
|
381 |
-
# # Set the random generator for inference
|
382 |
-
# generator = torch.Generator().manual_seed(seed)
|
383 |
-
|
384 |
-
# # Perform inference using the pipeline
|
385 |
-
# logger.info("Running pipeline")
|
386 |
-
# image = pipe(
|
387 |
-
# prompt="",
|
388 |
-
# control_image=control_image,
|
389 |
-
# controlnet_conditioning_scale=controlnet_conditioning_scale,
|
390 |
-
# num_inference_steps=num_inference_steps,
|
391 |
-
# guidance_scale=3.5,
|
392 |
-
# height=control_image.size[1],
|
393 |
-
# width=control_image.size[0],
|
394 |
-
# generator=generator,
|
395 |
-
# ).images[0]
|
396 |
-
|
397 |
-
# # Resize output image back to the original dimensions if needed
|
398 |
-
# if was_resized:
|
399 |
-
# original_size = (input_image.width * upscale_factor, input_image.height * upscale_factor)
|
400 |
-
# image = image.resize(original_size)
|
401 |
-
|
402 |
-
# # Convert the output image to base64
|
403 |
-
# buffered = io.BytesIO()
|
404 |
-
# image.save(buffered, format="JPEG")
|
405 |
-
# image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
406 |
-
|
407 |
-
# logger.info("Inference completed")
|
408 |
-
# return image_base64
|
409 |
-
|
410 |
-
# # Define Gradio interface
|
411 |
-
# def gradio_interface(input_image, upscale_factor=4, seed=42, num_inference_steps=28, controlnet_conditioning_scale=0.6):
|
412 |
-
# if randomize_seed:
|
413 |
-
# seed = random.randint(0, MAX_SEED)
|
414 |
-
# logger.info("Seed randomized to: %d", seed)
|
415 |
-
|
416 |
-
# # Run inference
|
417 |
-
# output_image_base64 = run_inference(input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale)
|
418 |
-
|
419 |
-
# return Image.open(io.BytesIO(base64.b64decode(output_image_base64)))
|
420 |
-
|
421 |
-
# # Create Gradio interface
|
422 |
-
# iface = gr.Interface(
|
423 |
-
# fn=gradio_interface,
|
424 |
-
# inputs=[
|
425 |
-
# gr.Image(type="pil", label="Input Image"),
|
426 |
-
# gr.Slider(min=1, max=8, step=1, label="Upscale Factor"),
|
427 |
-
# gr.Slider(min=0, max=MAX_SEED, step=1, label="Seed"),
|
428 |
-
# gr.Slider(min=1, max=100, step=1, label="Inference Steps"),
|
429 |
-
# gr.Slider(min=0.0, max=1.0, step=0.1, label="ControlNet Conditioning Scale")
|
430 |
-
# ],
|
431 |
-
# outputs=gr.Image(label="Output Image"),
|
432 |
-
# title="ControlNet Image Upscaling",
|
433 |
-
# description="Upload an image to upscale using the ControlNet model."
|
434 |
-
# )
|
435 |
-
|
436 |
-
# # Launch Gradio app
|
437 |
-
# if __name__ == '__main__':
|
438 |
-
# iface.launch()
|
439 |
-
|
440 |
-
|
441 |
-
# import logging
|
442 |
-
# import random
|
443 |
-
# import warnings
|
444 |
-
# import gradio as gr
|
445 |
-
# import os
|
446 |
-
# import shutil
|
447 |
-
# import spaces
|
448 |
-
# import subprocess
|
449 |
-
# import torch
|
450 |
-
# import numpy as np
|
451 |
-
# from diffusers import FluxControlNetModel
|
452 |
-
# from diffusers.pipelines import FluxControlNetPipeline
|
453 |
-
# from PIL import Image
|
454 |
-
# from huggingface_hub import snapshot_download, login
|
455 |
-
# import io
|
456 |
-
# import base64
|
457 |
-
# from concurrent.futures import ThreadPoolExecutor
|
458 |
-
|
459 |
-
# # Configure logging
|
460 |
-
# logging.basicConfig(level=logging.INFO)
|
461 |
-
# logger = logging.getLogger(__name__)
|
462 |
-
|
463 |
-
# # ThreadPoolExecutor for managing image processing threads
|
464 |
-
# executor = ThreadPoolExecutor()
|
465 |
-
|
466 |
-
# # Determine the device (GPU or CPU)
|
467 |
-
# if torch.cuda.is_available():
|
468 |
-
# device = "cuda"
|
469 |
-
# logger.info("CUDA is available. Using GPU.")
|
470 |
-
# else:
|
471 |
-
# device = "cpu"
|
472 |
-
# logger.info("CUDA is not available. Using CPU.")
|
473 |
-
|
474 |
-
# # Load model from Huggingface Hub
|
475 |
-
# huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
476 |
-
# if huggingface_token:
|
477 |
-
# login(token=huggingface_token)
|
478 |
-
# logger.info("Hugging Face token found and logged in.")
|
479 |
-
# else:
|
480 |
-
# logger.warning("Hugging Face token not found in environment variables.")
|
481 |
-
|
482 |
-
# # Download model using snapshot_download
|
483 |
-
# model_path = snapshot_download(
|
484 |
-
# repo_id="black-forest-labs/FLUX.1-dev",
|
485 |
-
# repo_type="model",
|
486 |
-
# ignore_patterns=["*.md", "*..gitattributes"],
|
487 |
-
# local_dir="FLUX.1-dev",
|
488 |
-
# token=huggingface_token
|
489 |
-
# )
|
490 |
-
# logger.info("Model downloaded to: %s", model_path)
|
491 |
-
|
492 |
-
# # Load pipeline
|
493 |
-
# logger.info('Loading ControlNet model.')
|
494 |
-
# controlnet = FluxControlNetModel.from_pretrained(
|
495 |
-
# "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
|
496 |
-
# ).to(device)
|
497 |
-
# logger.info("ControlNet model loaded successfully.")
|
498 |
-
|
499 |
-
# logger.info('Loading pipeline.')
|
500 |
-
# pipe = FluxControlNetPipeline.from_pretrained(
|
501 |
-
# model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
|
502 |
-
# ).to(device)
|
503 |
-
# logger.info("Pipeline loaded successfully.")
|
504 |
-
|
505 |
-
# MAX_SEED = 1000000
|
506 |
-
# MAX_PIXEL_BUDGET = 1024 * 1024
|
507 |
-
|
508 |
-
# @spaces.GPU
|
509 |
-
# def process_input(input_image, upscale_factor):
|
510 |
-
# w, h = input_image.size
|
511 |
-
# aspect_ratio = w / h
|
512 |
-
# was_resized = False
|
513 |
-
|
514 |
-
# # Resize if input size exceeds the maximum pixel budget
|
515 |
-
# if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
|
516 |
-
# warnings.warn(f"Requested output image is too large. Resizing to fit within pixel budget.")
|
517 |
-
# input_image = input_image.resize(
|
518 |
-
# (
|
519 |
-
# int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
|
520 |
-
# int(MAX_PIXEL_BUDGET**0.5 // aspect_ratio // upscale_factor),
|
521 |
-
# )
|
522 |
-
# )
|
523 |
-
# was_resized = True
|
524 |
-
|
525 |
-
# # Adjust dimensions to be a multiple of 8
|
526 |
-
# w, h = input_image.size
|
527 |
-
# w = w - w % 8
|
528 |
-
# h = h - h % 8
|
529 |
-
|
530 |
-
# return input_image.resize((w, h)), was_resized
|
531 |
-
|
532 |
-
|
533 |
-
# @spaces.GPU
|
534 |
-
# def run_inference(input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale):
|
535 |
-
# logger.info("Processing inference.")
|
536 |
-
# input_image, was_resized = process_input(input_image, upscale_factor)
|
537 |
-
|
538 |
-
# # Rescale image for ControlNet processing
|
539 |
-
# w, h = input_image.size
|
540 |
-
# control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
|
541 |
-
|
542 |
-
# # Set the random generator for inference
|
543 |
-
# generator = torch.Generator().manual_seed(seed)
|
544 |
-
|
545 |
-
# # Perform inference using the pipeline
|
546 |
-
# logger.info("Running pipeline.")
|
547 |
-
# image = pipe(
|
548 |
-
# prompt="",
|
549 |
-
# control_image=control_image,
|
550 |
-
# controlnet_conditioning_scale=controlnet_conditioning_scale,
|
551 |
-
# num_inference_steps=num_inference_steps,
|
552 |
-
# guidance_scale=3.5,
|
553 |
-
# height=control_image.size[1],
|
554 |
-
# width=control_image.size[0],
|
555 |
-
# generator=generator,
|
556 |
-
# ).images[0]
|
557 |
-
|
558 |
-
# # Resize output image back to the original dimensions if needed
|
559 |
-
# if was_resized:
|
560 |
-
# original_size = (input_image.width * upscale_factor, input_image.height * upscale_factor)
|
561 |
-
# image = image.resize(original_size)
|
562 |
-
|
563 |
-
# return image
|
564 |
-
|
565 |
-
# def run_gradio_app():
|
566 |
-
# with gr.Blocks() as app:
|
567 |
-
# gr.Markdown("## Image Upscaler using ControlNet")
|
568 |
-
|
569 |
-
# # Define the inputs and outputs
|
570 |
-
# input_image = gr.Image(type="pil", label="Input Image")
|
571 |
-
# upscale_factor = gr.Slider(minimum=1, maximum=8, step=1, label="Upscale Factor")
|
572 |
-
# seed = gr.Slider(minimum=0, maximum=100, step=1, label="Seed")
|
573 |
-
# num_inference_steps = gr.Slider(minimum=1, maximum=100, step=1, label="Inference Steps")
|
574 |
-
# controlnet_conditioning_scale = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label="ControlNet Conditioning Scale")
|
575 |
-
|
576 |
-
# output_image = gr.Image(type="pil", label="Output Image")
|
577 |
-
|
578 |
-
# # Create a button to trigger the processing
|
579 |
-
# submit_button = gr.Button("Upscale Image")
|
580 |
-
|
581 |
-
# # Define the function to run when the button is clicked
|
582 |
-
# submit_button.click(run_inference,
|
583 |
-
# inputs=[input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale],
|
584 |
-
# outputs=output_image)
|
585 |
-
|
586 |
-
# app.launch()
|
587 |
-
|
588 |
-
# if __name__ == "__main__":
|
589 |
-
# run_gradio_app()
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
# import logging
|
595 |
-
# import random
|
596 |
-
# import warnings
|
597 |
-
# import gradio as gr
|
598 |
-
# import os
|
599 |
-
# import shutil,spaces
|
600 |
-
# import subprocess
|
601 |
-
# import torch
|
602 |
-
# import numpy as np
|
603 |
-
# from diffusers import FluxControlNetModel
|
604 |
-
# from diffusers.pipelines import FluxControlNetPipeline
|
605 |
-
# from PIL import Image
|
606 |
-
# from huggingface_hub import snapshot_download, login
|
607 |
-
# import io
|
608 |
-
# import base64
|
609 |
-
# from fastapi import FastAPI, File, UploadFile
|
610 |
-
# from fastapi.responses import JSONResponse
|
611 |
-
# from fastapi.middleware.cors import CORSMiddleware
|
612 |
-
# from concurrent.futures import ThreadPoolExecutor
|
613 |
-
|
614 |
-
# # Configure logging
|
615 |
-
# logging.basicConfig(level=logging.INFO)
|
616 |
-
# logger = logging.getLogger(__name__)
|
617 |
-
|
618 |
-
# # FastAPI app for image processing
|
619 |
-
# app = FastAPI()
|
620 |
-
# app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"])
|
621 |
-
|
622 |
-
# # ThreadPoolExecutor for managing image processing threads
|
623 |
-
# executor = ThreadPoolExecutor()
|
624 |
-
|
625 |
-
# # Determine the device (GPU or CPU)
|
626 |
-
# if torch.cuda.is_available():
|
627 |
-
# device = "cuda"
|
628 |
-
# logger.info("CUDA is available. Using GPU.")
|
629 |
-
# else:
|
630 |
-
# device = "cpu"
|
631 |
-
# logger.info("CUDA is not available. Using CPU.")
|
632 |
-
|
633 |
-
# # Load model from Huggingface Hub
|
634 |
-
# huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
635 |
-
# if huggingface_token:
|
636 |
-
# login(token=huggingface_token)
|
637 |
-
# logger.info("Hugging Face token found and logged in.")
|
638 |
-
# else:
|
639 |
-
# logger.warning("Hugging Face token not found in environment variables.")
|
640 |
-
|
641 |
-
# # Download model using snapshot_download
|
642 |
-
# model_path = snapshot_download(
|
643 |
-
# repo_id="black-forest-labs/FLUX.1-dev",
|
644 |
-
# repo_type="model",
|
645 |
-
# ignore_patterns=["*.md", "*..gitattributes"],
|
646 |
-
# local_dir="FLUX.1-dev",
|
647 |
-
# token=huggingface_token
|
648 |
-
# )
|
649 |
-
# logger.info("Model downloaded to: %s", model_path)
|
650 |
-
|
651 |
-
# # Load pipeline
|
652 |
-
# logger.info('Loading ControlNet model.')
|
653 |
-
# controlnet = FluxControlNetModel.from_pretrained(
|
654 |
-
# "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
|
655 |
-
# ).to(device)
|
656 |
-
# logger.info("ControlNet model loaded successfully.")
|
657 |
-
|
658 |
-
# logger.info('Loading pipeline.')
|
659 |
-
# pipe = FluxControlNetPipeline.from_pretrained(
|
660 |
-
# model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
|
661 |
-
# ).to(device)
|
662 |
-
# logger.info("Pipeline loaded successfully.")
|
663 |
-
|
664 |
-
# MAX_SEED = 1000000
|
665 |
-
# MAX_PIXEL_BUDGET = 1024 * 1024
|
666 |
-
|
667 |
-
# @spaces.GPU
|
668 |
-
# def process_input(input_image, upscale_factor):
|
669 |
-
# w, h = input_image.size
|
670 |
-
# aspect_ratio = w / h
|
671 |
-
# was_resized = False
|
672 |
-
|
673 |
-
# # Resize if input size exceeds the maximum pixel budget
|
674 |
-
# if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
|
675 |
-
# warnings.warn(f"Requested output image is too large. Resizing to fit within pixel budget.")
|
676 |
-
# input_image = input_image.resize(
|
677 |
-
# (
|
678 |
-
# int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
|
679 |
-
# int(MAX_PIXEL_BUDGET**0.5 // aspect_ratio // upscale_factor),
|
680 |
-
# )
|
681 |
-
# )
|
682 |
-
# was_resized = True
|
683 |
-
|
684 |
-
# # Adjust dimensions to be a multiple of 8
|
685 |
-
# w, h = input_image.size
|
686 |
-
# w = w - w % 8
|
687 |
-
# h = h - h % 8
|
688 |
-
|
689 |
-
# return input_image.resize((w, h)), was_resized
|
690 |
-
|
691 |
-
# @spaces.GPU
|
692 |
-
# def run_inference(input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale):
|
693 |
-
# logger.info("Processing inference.")
|
694 |
-
# input_image, was_resized = process_input(input_image, upscale_factor)
|
695 |
-
|
696 |
-
# # Rescale image for ControlNet processing
|
697 |
-
# w, h = input_image.size
|
698 |
-
# control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
|
699 |
-
|
700 |
-
# # Set the random generator for inference
|
701 |
-
# generator = torch.Generator().manual_seed(seed)
|
702 |
-
|
703 |
-
# # Perform inference using the pipeline
|
704 |
-
# logger.info("Running pipeline.")
|
705 |
-
# image = pipe(
|
706 |
-
# prompt="",
|
707 |
-
# control_image=control_image,
|
708 |
-
# controlnet_conditioning_scale=controlnet_conditioning_scale,
|
709 |
-
# num_inference_steps=num_inference_steps,
|
710 |
-
# guidance_scale=3.5,
|
711 |
-
# height=control_image.size[1],
|
712 |
-
# width=control_image.size[0],
|
713 |
-
# generator=generator,
|
714 |
-
# ).images[0]
|
715 |
-
|
716 |
-
# # Resize output image back to the original dimensions if needed
|
717 |
-
# if was_resized:
|
718 |
-
# original_size = (input_image.width * upscale_factor, input_image.height * upscale_factor)
|
719 |
-
# image = image.resize(original_size)
|
720 |
-
|
721 |
-
# # Convert the output image to base64
|
722 |
-
# buffered = io.BytesIO()
|
723 |
-
# image.save(buffered, format="JPEG")
|
724 |
-
# image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
725 |
-
|
726 |
-
# return image_base64
|
727 |
-
|
728 |
-
# @app.post("/infer")
|
729 |
-
# async def infer(input_image: UploadFile = File(...),
|
730 |
-
# upscale_factor: int = 4,
|
731 |
-
# seed: int = 42,
|
732 |
-
# num_inference_steps: int = 28,
|
733 |
-
# controlnet_conditioning_scale: float = 0.6):
|
734 |
-
# logger.info("Received request for inference.")
|
735 |
-
|
736 |
-
# # Read the uploaded image
|
737 |
-
# contents = await input_image.read()
|
738 |
-
# image = Image.open(io.BytesIO(contents))
|
739 |
-
|
740 |
-
# # Run inference in a separate thread
|
741 |
-
# base64_image = await executor.submit(run_inference, image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale)
|
742 |
-
|
743 |
-
# return JSONResponse(content={"base64_image": base64_image})
|
744 |
-
|
745 |
-
# def run_gradio_app():
|
746 |
-
# with gr.Blocks() as app:
|
747 |
-
# gr.Markdown("## Image Upscaler using ControlNet")
|
748 |
-
|
749 |
-
# # Define the inputs and outputs
|
750 |
-
# input_image = gr.Image(type="pil", label="Input Image")
|
751 |
-
# upscale_factor = gr.Slider(minimum=1, maximum=8, step=1, label="Upscale Factor")
|
752 |
-
# seed = gr.Slider(minimum=0, maximum=100, step=1, label="Seed")
|
753 |
-
# num_inference_steps = gr.Slider(minimum=1, maximum=100, step=1, label="Inference Steps")
|
754 |
-
# controlnet_conditioning_scale = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label="ControlNet Conditioning Scale")
|
755 |
-
|
756 |
-
# output_image = gr.Image(type="pil", label="Output Image")
|
757 |
-
# output_base64 = gr.Textbox(label="Base64 String", interactive=False)
|
758 |
-
|
759 |
-
# # Create a button to trigger the processing
|
760 |
-
# submit_button = gr.Button("Upscale Image")
|
761 |
-
|
762 |
-
# # Define the function to run when the button is clicked
|
763 |
-
# submit_button.click(run_inference,
|
764 |
-
# inputs=[input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale],
|
765 |
-
# outputs=[output_image, output_base64])
|
766 |
-
|
767 |
-
# app.launch()
|
768 |
-
|
769 |
-
# if __name__ == "__main__":
|
770 |
-
# run_gradio_app()
|
771 |
-
|
772 |
-
|
773 |
-
|
774 |
import spaces
|
775 |
import logging
|
776 |
import random
|
@@ -792,6 +19,7 @@ from fastapi.middleware.cors import CORSMiddleware
|
|
792 |
from concurrent.futures import ThreadPoolExecutor
|
793 |
import uvicorn
|
794 |
import asyncio
|
|
|
795 |
|
796 |
# Configure logging
|
797 |
logging.basicConfig(level=logging.INFO)
|
@@ -915,6 +143,9 @@ async def infer(input_image: UploadFile = File(...),
|
|
915 |
controlnet_conditioning_scale: float = 0.6):
|
916 |
logger.info("Received request for inference.")
|
917 |
|
|
|
|
|
|
|
918 |
# Read the uploaded image
|
919 |
contents = await input_image.read()
|
920 |
image = Image.open(io.BytesIO(contents))
|
@@ -925,13 +156,11 @@ async def infer(input_image: UploadFile = File(...),
|
|
925 |
# Run inference in a separate thread
|
926 |
base64_image = await loop.run_in_executor(executor, run_inference, image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale)
|
927 |
|
928 |
-
|
|
|
|
|
|
|
929 |
|
930 |
if __name__ == "__main__":
|
931 |
# Start FastAPI server
|
932 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
933 |
-
|
934 |
-
|
935 |
-
|
936 |
-
|
937 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import spaces
|
2 |
import logging
|
3 |
import random
|
|
|
19 |
from concurrent.futures import ThreadPoolExecutor
|
20 |
import uvicorn
|
21 |
import asyncio
|
22 |
+
import time # Import time module for measuring execution time
|
23 |
|
24 |
# Configure logging
|
25 |
logging.basicConfig(level=logging.INFO)
|
|
|
143 |
controlnet_conditioning_scale: float = 0.6):
|
144 |
logger.info("Received request for inference.")
|
145 |
|
146 |
+
# Start timing the entire inference process
|
147 |
+
start_time = time.time()
|
148 |
+
|
149 |
# Read the uploaded image
|
150 |
contents = await input_image.read()
|
151 |
image = Image.open(io.BytesIO(contents))
|
|
|
156 |
# Run inference in a separate thread
|
157 |
base64_image = await loop.run_in_executor(executor, run_inference, image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale)
|
158 |
|
159 |
+
# Calculate the time taken
|
160 |
+
time_taken = time.time() - start_time
|
161 |
+
|
162 |
+
return JSONResponse(content={"base64_image": base64_image, "time_taken": time_taken})
|
163 |
|
164 |
if __name__ == "__main__":
|
165 |
# Start FastAPI server
|
166 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|
|
|
|
|
|
|
|