Spanicin commited on
Commit
fcb0cff
·
verified ·
1 Parent(s): da042e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +357 -173
app.py CHANGED
@@ -1,43 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import logging
2
  import random
3
  import warnings
4
  import os
5
- import gradio as gr
6
- import numpy as np
7
- import spaces
8
  import torch
 
9
  from diffusers import FluxControlNetModel
10
  from diffusers.pipelines import FluxControlNetPipeline
11
- from gradio_imageslider import ImageSlider
12
  from PIL import Image
13
  from huggingface_hub import snapshot_download
 
 
 
 
 
 
 
 
14
 
15
- css = """
16
- #col-container {
17
- margin: 0 auto;
18
- max-width: 512px;
19
- }
20
- """
21
 
 
 
 
 
22
  if torch.cuda.is_available():
23
- power_device = "GPU"
24
  device = "cuda"
25
  else:
26
- power_device = "CPU"
27
  device = "cpu"
28
 
29
-
30
- huggingface_token = os.getenv("HUGGINFACE_TOKEN")
31
-
32
  model_path = snapshot_download(
33
  repo_id="black-forest-labs/FLUX.1-dev",
34
  repo_type="model",
35
  ignore_patterns=["*.md", "*..gitattributes"],
36
  local_dir="FLUX.1-dev",
37
- token=huggingface_token, # type a new token-id.
38
  )
39
 
40
-
41
  # Load pipeline
42
  controlnet = FluxControlNetModel.from_pretrained(
43
  "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
@@ -50,21 +303,14 @@ pipe.to(device)
50
  MAX_SEED = 1000000
51
  MAX_PIXEL_BUDGET = 1024 * 1024
52
 
53
-
54
- def process_input(input_image, upscale_factor, **kwargs):
55
  w, h = input_image.size
56
- w_original, h_original = w, h
57
  aspect_ratio = w / h
58
-
59
  was_resized = False
60
 
 
61
  if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
62
- warnings.warn(
63
- f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels."
64
- )
65
- gr.Info(
66
- f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing input to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels budget."
67
- )
68
  input_image = input_image.resize(
69
  (
70
  int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
@@ -73,38 +319,24 @@ def process_input(input_image, upscale_factor, **kwargs):
73
  )
74
  was_resized = True
75
 
76
- # resize to multiple of 8
77
  w, h = input_image.size
78
  w = w - w % 8
79
  h = h - h % 8
80
 
81
- return input_image.resize((w, h)), w_original, h_original, was_resized
82
 
 
 
83
 
84
- @spaces.GPU#(duration=42)
85
- def infer(
86
- seed,
87
- randomize_seed,
88
- input_image,
89
- num_inference_steps,
90
- upscale_factor,
91
- controlnet_conditioning_scale,
92
- progress=gr.Progress(track_tqdm=True),
93
- ):
94
- if randomize_seed:
95
- seed = random.randint(0, MAX_SEED)
96
- true_input_image = input_image
97
- input_image, w_original, h_original, was_resized = process_input(
98
- input_image, upscale_factor
99
- )
100
-
101
- # rescale with upscale factor
102
  w, h = input_image.size
103
  control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
104
 
 
105
  generator = torch.Generator().manual_seed(seed)
106
 
107
- gr.Info("Upscaling image...")
108
  image = pipe(
109
  prompt="",
110
  control_image=control_image,
@@ -116,131 +348,83 @@ def infer(
116
  generator=generator,
117
  ).images[0]
118
 
 
119
  if was_resized:
120
- gr.Info(
121
- f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
122
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
- # resize to target desired size
125
- image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
126
- image.save("output.jpg")
127
- # convert to numpy
128
- return [true_input_image, image, seed]
129
-
130
-
131
- with gr.Blocks(css=css) as demo:
132
- # with gr.Column(elem_id="col-container"):
133
- gr.Markdown(
134
- f"""
135
- # Flux.1-dev Upscaler ControlNet
136
- This is an interactive demo of [Flux.1-dev Upscaler ControlNet](https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Upscaler) taking as input a low resolution image to generate a high resolution image.
137
- Currently running on {power_device}.
138
-
139
- *Note*: Even though the model can handle higher resolution images, due to GPU memory constraints, this demo was limited to a generated output not exceeding a pixel budget of 1024x1024. If the requested size exceeds that limit, the input will be first resized keeping the aspect ratio such that the output of the controlNet model does not exceed the allocated pixel budget. The output is then resized to the targeted shape using a simple resizing. This may explain some artifacts for high resolution input. To adress this, run the demo locally or consider implementing a tiling strategy. Happy upscaling! 🚀
140
- """
141
- )
142
-
143
- with gr.Row():
144
- run_button = gr.Button(value="Run")
145
-
146
- with gr.Row():
147
- with gr.Column(scale=4):
148
- input_im = gr.Image(label="Input Image", type="pil")
149
- with gr.Column(scale=1):
150
- num_inference_steps = gr.Slider(
151
- label="Number of Inference Steps",
152
- minimum=8,
153
- maximum=50,
154
- step=1,
155
- value=28,
156
- )
157
- upscale_factor = gr.Slider(
158
- label="Upscale Factor",
159
- minimum=1,
160
- maximum=4,
161
- step=1,
162
- value=4,
163
- )
164
- controlnet_conditioning_scale = gr.Slider(
165
- label="Controlnet Conditioning Scale",
166
- minimum=0.1,
167
- maximum=1.5,
168
- step=0.1,
169
- value=0.6,
170
- )
171
- seed = gr.Slider(
172
- label="Seed",
173
- minimum=0,
174
- maximum=MAX_SEED,
175
- step=1,
176
- value=42,
177
- )
178
 
179
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
180
-
181
- with gr.Row():
182
- result = ImageSlider(label="Input / Output", type="pil", interactive=True)
183
-
184
- examples = gr.Examples(
185
- examples=[
186
- # [42, False, "examples/image_1.jpg", 28, 4, 0.6],
187
- [42, False, "examples/image_2.jpg", 28, 4, 0.6],
188
- # [42, False, "examples/image_3.jpg", 28, 4, 0.6],
189
- [42, False, "examples/image_4.jpg", 28, 4, 0.6],
190
- # [42, False, "examples/image_5.jpg", 28, 4, 0.6],
191
- # [42, False, "examples/image_6.jpg", 28, 4, 0.6],
192
- ],
193
- inputs=[
194
- seed,
195
- randomize_seed,
196
- input_im,
197
- num_inference_steps,
198
- upscale_factor,
199
- controlnet_conditioning_scale,
200
- ],
201
- fn=infer,
202
- outputs=result,
203
- cache_examples="lazy",
204
- )
205
-
206
- # examples = gr.Examples(
207
- # examples=[
208
- # #[42, False, "examples/image_1.jpg", 28, 4, 0.6],
209
- # [42, False, "examples/image_2.jpg", 28, 4, 0.6],
210
- # #[42, False, "examples/image_3.jpg", 28, 4, 0.6],
211
- # #[42, False, "examples/image_4.jpg", 28, 4, 0.6],
212
- # [42, False, "examples/image_5.jpg", 28, 4, 0.6],
213
- # [42, False, "examples/image_6.jpg", 28, 4, 0.6],
214
- # [42, False, "examples/image_7.jpg", 28, 4, 0.6],
215
- # ],
216
- # inputs=[
217
- # seed,
218
- # randomize_seed,
219
- # input_im,
220
- # num_inference_steps,
221
- # upscale_factor,
222
- # controlnet_conditioning_scale,
223
- # ],
224
- # )
225
-
226
- gr.Markdown("**Disclaimer:**")
227
- gr.Markdown(
228
- "This demo is only for research purpose. Jasper cannot be held responsible for the generation of NSFW (Not Safe For Work) content through the use of this demo. Users are solely responsible for any content they create, and it is their obligation to ensure that it adheres to appropriate and ethical standards. Jasper provides the tools, but the responsibility for their use lies with the individual user."
229
- )
230
- gr.on(
231
- [run_button.click],
232
- fn=infer,
233
- inputs=[
234
- seed,
235
- randomize_seed,
236
- input_im,
237
- num_inference_steps,
238
- upscale_factor,
239
- controlnet_conditioning_scale,
240
- ],
241
- outputs=result,
242
- show_api=False,
243
- # show_progress="minimal",
244
- )
245
-
246
- demo.queue().launch(share=False, show_api=False)
 
1
+ # import logging
2
+ # import random
3
+ # import warnings
4
+ # import os
5
+ # import gradio as gr
6
+ # import numpy as np
7
+ # import spaces
8
+ # import torch
9
+ # from diffusers import FluxControlNetModel
10
+ # from diffusers.pipelines import FluxControlNetPipeline
11
+ # from gradio_imageslider import ImageSlider
12
+ # from PIL import Image
13
+ # from huggingface_hub import snapshot_download
14
+
15
+ # css = """
16
+ # #col-container {
17
+ # margin: 0 auto;
18
+ # max-width: 512px;
19
+ # }
20
+ # """
21
+
22
+ # if torch.cuda.is_available():
23
+ # power_device = "GPU"
24
+ # device = "cuda"
25
+ # else:
26
+ # power_device = "CPU"
27
+ # device = "cpu"
28
+
29
+
30
+ # huggingface_token = os.getenv("HUGGINFACE_TOKEN")
31
+
32
+ # model_path = snapshot_download(
33
+ # repo_id="black-forest-labs/FLUX.1-dev",
34
+ # repo_type="model",
35
+ # ignore_patterns=["*.md", "*..gitattributes"],
36
+ # local_dir="FLUX.1-dev",
37
+ # token=huggingface_token, # type a new token-id.
38
+ # )
39
+
40
+
41
+ # # Load pipeline
42
+ # controlnet = FluxControlNetModel.from_pretrained(
43
+ # "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
44
+ # ).to(device)
45
+ # pipe = FluxControlNetPipeline.from_pretrained(
46
+ # model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
47
+ # )
48
+ # pipe.to(device)
49
+
50
+ # MAX_SEED = 1000000
51
+ # MAX_PIXEL_BUDGET = 1024 * 1024
52
+
53
+
54
+ # def process_input(input_image, upscale_factor, **kwargs):
55
+ # w, h = input_image.size
56
+ # w_original, h_original = w, h
57
+ # aspect_ratio = w / h
58
+
59
+ # was_resized = False
60
+
61
+ # if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
62
+ # warnings.warn(
63
+ # f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels."
64
+ # )
65
+ # gr.Info(
66
+ # f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing input to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels budget."
67
+ # )
68
+ # input_image = input_image.resize(
69
+ # (
70
+ # int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
71
+ # int(MAX_PIXEL_BUDGET**0.5 // aspect_ratio // upscale_factor),
72
+ # )
73
+ # )
74
+ # was_resized = True
75
+
76
+ # # resize to multiple of 8
77
+ # w, h = input_image.size
78
+ # w = w - w % 8
79
+ # h = h - h % 8
80
+
81
+ # return input_image.resize((w, h)), w_original, h_original, was_resized
82
+
83
+
84
+ # @spaces.GPU#(duration=42)
85
+ # def infer(
86
+ # seed,
87
+ # randomize_seed,
88
+ # input_image,
89
+ # num_inference_steps,
90
+ # upscale_factor,
91
+ # controlnet_conditioning_scale,
92
+ # progress=gr.Progress(track_tqdm=True),
93
+ # ):
94
+ # if randomize_seed:
95
+ # seed = random.randint(0, MAX_SEED)
96
+ # true_input_image = input_image
97
+ # input_image, w_original, h_original, was_resized = process_input(
98
+ # input_image, upscale_factor
99
+ # )
100
+
101
+ # # rescale with upscale factor
102
+ # w, h = input_image.size
103
+ # control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
104
+
105
+ # generator = torch.Generator().manual_seed(seed)
106
+
107
+ # gr.Info("Upscaling image...")
108
+ # image = pipe(
109
+ # prompt="",
110
+ # control_image=control_image,
111
+ # controlnet_conditioning_scale=controlnet_conditioning_scale,
112
+ # num_inference_steps=num_inference_steps,
113
+ # guidance_scale=3.5,
114
+ # height=control_image.size[1],
115
+ # width=control_image.size[0],
116
+ # generator=generator,
117
+ # ).images[0]
118
+
119
+ # if was_resized:
120
+ # gr.Info(
121
+ # f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
122
+ # )
123
+
124
+ # # resize to target desired size
125
+ # image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
126
+ # image.save("output.jpg")
127
+ # # convert to numpy
128
+ # return [true_input_image, image, seed]
129
+
130
+
131
+ # with gr.Blocks(css=css) as demo:
132
+ # # with gr.Column(elem_id="col-container"):
133
+ # gr.Markdown(
134
+ # f"""
135
+ # # ⚡ Flux.1-dev Upscaler ControlNet ⚡
136
+ # This is an interactive demo of [Flux.1-dev Upscaler ControlNet](https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Upscaler) taking as input a low resolution image to generate a high resolution image.
137
+ # Currently running on {power_device}.
138
+
139
+ # *Note*: Even though the model can handle higher resolution images, due to GPU memory constraints, this demo was limited to a generated output not exceeding a pixel budget of 1024x1024. If the requested size exceeds that limit, the input will be first resized keeping the aspect ratio such that the output of the controlNet model does not exceed the allocated pixel budget. The output is then resized to the targeted shape using a simple resizing. This may explain some artifacts for high resolution input. To adress this, run the demo locally or consider implementing a tiling strategy. Happy upscaling! 🚀
140
+ # """
141
+ # )
142
+
143
+ # with gr.Row():
144
+ # run_button = gr.Button(value="Run")
145
+
146
+ # with gr.Row():
147
+ # with gr.Column(scale=4):
148
+ # input_im = gr.Image(label="Input Image", type="pil")
149
+ # with gr.Column(scale=1):
150
+ # num_inference_steps = gr.Slider(
151
+ # label="Number of Inference Steps",
152
+ # minimum=8,
153
+ # maximum=50,
154
+ # step=1,
155
+ # value=28,
156
+ # )
157
+ # upscale_factor = gr.Slider(
158
+ # label="Upscale Factor",
159
+ # minimum=1,
160
+ # maximum=4,
161
+ # step=1,
162
+ # value=4,
163
+ # )
164
+ # controlnet_conditioning_scale = gr.Slider(
165
+ # label="Controlnet Conditioning Scale",
166
+ # minimum=0.1,
167
+ # maximum=1.5,
168
+ # step=0.1,
169
+ # value=0.6,
170
+ # )
171
+ # seed = gr.Slider(
172
+ # label="Seed",
173
+ # minimum=0,
174
+ # maximum=MAX_SEED,
175
+ # step=1,
176
+ # value=42,
177
+ # )
178
+
179
+ # randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
180
+
181
+ # with gr.Row():
182
+ # result = ImageSlider(label="Input / Output", type="pil", interactive=True)
183
+
184
+ # examples = gr.Examples(
185
+ # examples=[
186
+ # # [42, False, "examples/image_1.jpg", 28, 4, 0.6],
187
+ # [42, False, "examples/image_2.jpg", 28, 4, 0.6],
188
+ # # [42, False, "examples/image_3.jpg", 28, 4, 0.6],
189
+ # [42, False, "examples/image_4.jpg", 28, 4, 0.6],
190
+ # # [42, False, "examples/image_5.jpg", 28, 4, 0.6],
191
+ # # [42, False, "examples/image_6.jpg", 28, 4, 0.6],
192
+ # ],
193
+ # inputs=[
194
+ # seed,
195
+ # randomize_seed,
196
+ # input_im,
197
+ # num_inference_steps,
198
+ # upscale_factor,
199
+ # controlnet_conditioning_scale,
200
+ # ],
201
+ # fn=infer,
202
+ # outputs=result,
203
+ # cache_examples="lazy",
204
+ # )
205
+
206
+ # # examples = gr.Examples(
207
+ # # examples=[
208
+ # # #[42, False, "examples/image_1.jpg", 28, 4, 0.6],
209
+ # # [42, False, "examples/image_2.jpg", 28, 4, 0.6],
210
+ # # #[42, False, "examples/image_3.jpg", 28, 4, 0.6],
211
+ # # #[42, False, "examples/image_4.jpg", 28, 4, 0.6],
212
+ # # [42, False, "examples/image_5.jpg", 28, 4, 0.6],
213
+ # # [42, False, "examples/image_6.jpg", 28, 4, 0.6],
214
+ # # [42, False, "examples/image_7.jpg", 28, 4, 0.6],
215
+ # # ],
216
+ # # inputs=[
217
+ # # seed,
218
+ # # randomize_seed,
219
+ # # input_im,
220
+ # # num_inference_steps,
221
+ # # upscale_factor,
222
+ # # controlnet_conditioning_scale,
223
+ # # ],
224
+ # # )
225
+
226
+ # gr.Markdown("**Disclaimer:**")
227
+ # gr.Markdown(
228
+ # "This demo is only for research purpose. Jasper cannot be held responsible for the generation of NSFW (Not Safe For Work) content through the use of this demo. Users are solely responsible for any content they create, and it is their obligation to ensure that it adheres to appropriate and ethical standards. Jasper provides the tools, but the responsibility for their use lies with the individual user."
229
+ # )
230
+ # gr.on(
231
+ # [run_button.click],
232
+ # fn=infer,
233
+ # inputs=[
234
+ # seed,
235
+ # randomize_seed,
236
+ # input_im,
237
+ # num_inference_steps,
238
+ # upscale_factor,
239
+ # controlnet_conditioning_scale,
240
+ # ],
241
+ # outputs=result,
242
+ # show_api=False,
243
+ # # show_progress="minimal",
244
+ # )
245
+
246
+ # demo.queue().launch(share=False, show_api=False)
247
+
248
+
249
+
250
+
251
+
252
+
253
  import logging
254
  import random
255
  import warnings
256
  import os
 
 
 
257
  import torch
258
+ import numpy as np
259
  from diffusers import FluxControlNetModel
260
  from diffusers.pipelines import FluxControlNetPipeline
 
261
  from PIL import Image
262
  from huggingface_hub import snapshot_download
263
+ import io
264
+ import base64
265
+ from flask import Flask, request, jsonify
266
+ from concurrent.futures import ThreadPoolExecutor
267
+ from flask_cors import CORS
268
+
269
+ app = Flask(__name__)
270
+ CORS(app)
271
 
272
+ # Add config to store base64 images
273
+ app.config['image_outputs'] = {}
 
 
 
 
274
 
275
+ # ThreadPoolExecutor for managing image processing threads
276
+ executor = ThreadPoolExecutor()
277
+
278
+ # Determine the device (GPU or CPU)
279
  if torch.cuda.is_available():
 
280
  device = "cuda"
281
  else:
 
282
  device = "cpu"
283
 
284
+ # Load model from Huggingface Hub
285
+ huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
 
286
  model_path = snapshot_download(
287
  repo_id="black-forest-labs/FLUX.1-dev",
288
  repo_type="model",
289
  ignore_patterns=["*.md", "*..gitattributes"],
290
  local_dir="FLUX.1-dev",
291
+ token=huggingface_token,
292
  )
293
 
 
294
  # Load pipeline
295
  controlnet = FluxControlNetModel.from_pretrained(
296
  "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
 
303
  MAX_SEED = 1000000
304
  MAX_PIXEL_BUDGET = 1024 * 1024
305
 
306
+ def process_input(input_image, upscale_factor):
 
307
  w, h = input_image.size
 
308
  aspect_ratio = w / h
 
309
  was_resized = False
310
 
311
+ # Resize if input size exceeds the maximum pixel budget
312
  if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
313
+ warnings.warn(f"Requested output image is too large. Resizing to fit within pixel budget.")
 
 
 
 
 
314
  input_image = input_image.resize(
315
  (
316
  int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
 
319
  )
320
  was_resized = True
321
 
322
+ # Adjust dimensions to be a multiple of 8
323
  w, h = input_image.size
324
  w = w - w % 8
325
  h = h - h % 8
326
 
327
+ return input_image.resize((w, h)), was_resized
328
 
329
+ def run_inference(process_id, input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale):
330
+ input_image, was_resized = process_input(input_image, upscale_factor)
331
 
332
+ # Rescale image for ControlNet processing
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
  w, h = input_image.size
334
  control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
335
 
336
+ # Set the random generator for inference
337
  generator = torch.Generator().manual_seed(seed)
338
 
339
+ # Perform inference using the pipeline
340
  image = pipe(
341
  prompt="",
342
  control_image=control_image,
 
348
  generator=generator,
349
  ).images[0]
350
 
351
+ # Resize output image back to the original dimensions if needed
352
  if was_resized:
353
+ original_size = (input_image.width * upscale_factor, input_image.height * upscale_factor)
354
+ image = image.resize(original_size)
355
+
356
+ # Convert the output image to base64
357
+ buffered = io.BytesIO()
358
+ image.save(buffered, format="JPEG")
359
+ image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
360
+
361
+ # Store the result in the shared dictionary
362
+ app.config['image_outputs'][process_id] = image_base64
363
+
364
+ @app.route('/infer', methods=['POST'])
365
+ def infer():
366
+ data = request.json
367
+ seed = data.get("seed", 42)
368
+ randomize_seed = data.get("randomize_seed", True)
369
+ num_inference_steps = data.get("num_inference_steps", 28)
370
+ upscale_factor = data.get("upscale_factor", 4)
371
+ controlnet_conditioning_scale = data.get("controlnet_conditioning_scale", 0.6)
372
+
373
+ # Randomize seed if specified
374
+ if randomize_seed:
375
+ seed = random.randint(0, MAX_SEED)
376
 
377
+ # Load and process the input image
378
+ input_image_data = base64.b64decode(data['input_image'])
379
+ input_image = Image.open(io.BytesIO(input_image_data))
380
+
381
+ # Create a unique process ID for this request
382
+ process_id = str(random.randint(1000, 9999))
383
+
384
+ # Set the status to 'in_progress'
385
+ app.config['image_outputs'][process_id] = None
386
+
387
+ # Run the inference in a separate thread
388
+ executor.submit(run_inference, process_id, input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale)
389
+
390
+ # Return the process ID
391
+ return jsonify({
392
+ "process_id": process_id,
393
+ "message": "Processing started"
394
+ })
395
+
396
+ # Modify status endpoint to receive process_id in request body
397
+ @app.route('/status', methods=['POST'])
398
+ def status():
399
+ data = request.json
400
+ process_id = data.get('process_id')
401
+
402
+ # Check if process_id was provided
403
+ if not process_id:
404
+ return jsonify({
405
+ "status": "error",
406
+ "message": "Process ID is required"
407
+ }), 400
408
+
409
+ # Check if the process_id exists in the dictionary
410
+ if process_id not in app.config['image_outputs']:
411
+ return jsonify({
412
+ "status": "error",
413
+ "message": "Invalid process ID"
414
+ }), 404
415
+
416
+ # Check the status of the image processing
417
+ image_base64 = app.config['image_outputs'][process_id]
418
+ if image_base64 is None:
419
+ return jsonify({
420
+ "status": "in_progress"
421
+ })
422
+ else:
423
+ return jsonify({
424
+ "status": "completed",
425
+ "output_image": image_base64
426
+ })
427
+
428
+ if __name__ == '__main__':
429
+ app.run(debug=True)
 
430