Spanicin commited on
Commit
68c48e6
·
verified ·
1 Parent(s): abb89b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +446 -209
app.py CHANGED
@@ -1,70 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # import logging
2
  # import random
3
  # import warnings
4
- # import os
5
- # import gradio as gr
6
- # import numpy as np
7
- # import spaces
8
  # import torch
 
9
  # from diffusers import FluxControlNetModel
10
  # from diffusers.pipelines import FluxControlNetPipeline
11
- # from gradio_imageslider import ImageSlider
12
  # from PIL import Image
13
- # from huggingface_hub import snapshot_download
14
-
15
- # css = """
16
- # #col-container {
17
- # margin: 0 auto;
18
- # max-width: 512px;
19
- # }
20
- # """
21
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  # if torch.cuda.is_available():
23
- # power_device = "GPU"
24
  # device = "cuda"
25
  # else:
26
- # power_device = "CPU"
27
  # device = "cpu"
28
 
29
-
30
- # huggingface_token = os.getenv("HUGGINFACE_TOKEN")
31
-
32
- # model_path = snapshot_download(
 
 
 
 
 
33
  # repo_id="black-forest-labs/FLUX.1-dev",
34
  # repo_type="model",
35
  # ignore_patterns=["*.md", "*..gitattributes"],
36
  # local_dir="FLUX.1-dev",
37
- # token=huggingface_token, # type a new token-id.
38
- # )
39
-
40
 
41
  # # Load pipeline
42
- # controlnet = FluxControlNetModel.from_pretrained(
 
 
43
  # "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
44
  # ).to(device)
45
- # pipe = FluxControlNetPipeline.from_pretrained(
 
 
 
46
  # model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
47
- # )
48
- # pipe.to(device)
 
49
 
50
  # MAX_SEED = 1000000
51
  # MAX_PIXEL_BUDGET = 1024 * 1024
52
 
53
-
54
- # def process_input(input_image, upscale_factor, **kwargs):
55
  # w, h = input_image.size
56
- # w_original, h_original = w, h
57
  # aspect_ratio = w / h
58
-
59
  # was_resized = False
60
 
 
61
  # if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
62
- # warnings.warn(
63
- # f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels."
64
- # )
65
- # gr.Info(
66
- # f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing input to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels budget."
67
- # )
68
  # input_image = input_image.resize(
69
  # (
70
  # int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
@@ -73,38 +351,24 @@
73
  # )
74
  # was_resized = True
75
 
76
- # # resize to multiple of 8
77
  # w, h = input_image.size
78
  # w = w - w % 8
79
  # h = h - h % 8
80
 
81
- # return input_image.resize((w, h)), w_original, h_original, was_resized
82
 
 
 
83
 
84
- # @spaces.GPU#(duration=42)
85
- # def infer(
86
- # seed,
87
- # randomize_seed,
88
- # input_image,
89
- # num_inference_steps,
90
- # upscale_factor,
91
- # controlnet_conditioning_scale,
92
- # progress=gr.Progress(track_tqdm=True),
93
- # ):
94
- # if randomize_seed:
95
- # seed = random.randint(0, MAX_SEED)
96
- # true_input_image = input_image
97
- # input_image, w_original, h_original, was_resized = process_input(
98
- # input_image, upscale_factor
99
- # )
100
-
101
- # # rescale with upscale factor
102
  # w, h = input_image.size
103
  # control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
104
 
 
105
  # generator = torch.Generator().manual_seed(seed)
106
 
107
- # gr.Info("Upscaling image...")
108
  # image = pipe(
109
  # prompt="",
110
  # control_image=control_image,
@@ -116,135 +380,85 @@
116
  # generator=generator,
117
  # ).images[0]
118
 
 
119
  # if was_resized:
120
- # gr.Info(
121
- # f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
122
- # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
- # # resize to target desired size
125
- # image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
126
- # image.save("output.jpg")
127
- # # convert to numpy
128
- # return [true_input_image, image, seed]
129
-
130
-
131
- # with gr.Blocks(css=css) as demo:
132
- # # with gr.Column(elem_id="col-container"):
133
- # gr.Markdown(
134
- # f"""
135
- # # Flux.1-dev Upscaler ControlNet
136
- # This is an interactive demo of [Flux.1-dev Upscaler ControlNet](https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Upscaler) taking as input a low resolution image to generate a high resolution image.
137
- # Currently running on {power_device}.
138
-
139
- # *Note*: Even though the model can handle higher resolution images, due to GPU memory constraints, this demo was limited to a generated output not exceeding a pixel budget of 1024x1024. If the requested size exceeds that limit, the input will be first resized keeping the aspect ratio such that the output of the controlNet model does not exceed the allocated pixel budget. The output is then resized to the targeted shape using a simple resizing. This may explain some artifacts for high resolution input. To adress this, run the demo locally or consider implementing a tiling strategy. Happy upscaling! 🚀
140
- # """
141
- # )
142
-
143
- # with gr.Row():
144
- # run_button = gr.Button(value="Run")
145
-
146
- # with gr.Row():
147
- # with gr.Column(scale=4):
148
- # input_im = gr.Image(label="Input Image", type="pil")
149
- # with gr.Column(scale=1):
150
- # num_inference_steps = gr.Slider(
151
- # label="Number of Inference Steps",
152
- # minimum=8,
153
- # maximum=50,
154
- # step=1,
155
- # value=28,
156
- # )
157
- # upscale_factor = gr.Slider(
158
- # label="Upscale Factor",
159
- # minimum=1,
160
- # maximum=4,
161
- # step=1,
162
- # value=4,
163
- # )
164
- # controlnet_conditioning_scale = gr.Slider(
165
- # label="Controlnet Conditioning Scale",
166
- # minimum=0.1,
167
- # maximum=1.5,
168
- # step=0.1,
169
- # value=0.6,
170
- # )
171
- # seed = gr.Slider(
172
- # label="Seed",
173
- # minimum=0,
174
- # maximum=MAX_SEED,
175
- # step=1,
176
- # value=42,
177
- # )
178
 
179
- # randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
180
-
181
- # with gr.Row():
182
- # result = ImageSlider(label="Input / Output", type="pil", interactive=True)
183
-
184
- # examples = gr.Examples(
185
- # examples=[
186
- # # [42, False, "examples/image_1.jpg", 28, 4, 0.6],
187
- # [42, False, "examples/image_2.jpg", 28, 4, 0.6],
188
- # # [42, False, "examples/image_3.jpg", 28, 4, 0.6],
189
- # [42, False, "examples/image_4.jpg", 28, 4, 0.6],
190
- # # [42, False, "examples/image_5.jpg", 28, 4, 0.6],
191
- # # [42, False, "examples/image_6.jpg", 28, 4, 0.6],
192
- # ],
193
- # inputs=[
194
- # seed,
195
- # randomize_seed,
196
- # input_im,
197
- # num_inference_steps,
198
- # upscale_factor,
199
- # controlnet_conditioning_scale,
200
- # ],
201
- # fn=infer,
202
- # outputs=result,
203
- # cache_examples="lazy",
204
- # )
205
-
206
- # # examples = gr.Examples(
207
- # # examples=[
208
- # # #[42, False, "examples/image_1.jpg", 28, 4, 0.6],
209
- # # [42, False, "examples/image_2.jpg", 28, 4, 0.6],
210
- # # #[42, False, "examples/image_3.jpg", 28, 4, 0.6],
211
- # # #[42, False, "examples/image_4.jpg", 28, 4, 0.6],
212
- # # [42, False, "examples/image_5.jpg", 28, 4, 0.6],
213
- # # [42, False, "examples/image_6.jpg", 28, 4, 0.6],
214
- # # [42, False, "examples/image_7.jpg", 28, 4, 0.6],
215
- # # ],
216
- # # inputs=[
217
- # # seed,
218
- # # randomize_seed,
219
- # # input_im,
220
- # # num_inference_steps,
221
- # # upscale_factor,
222
- # # controlnet_conditioning_scale,
223
- # # ],
224
- # # )
225
-
226
- # gr.Markdown("**Disclaimer:**")
227
- # gr.Markdown(
228
- # "This demo is only for research purpose. Jasper cannot be held responsible for the generation of NSFW (Not Safe For Work) content through the use of this demo. Users are solely responsible for any content they create, and it is their obligation to ensure that it adheres to appropriate and ethical standards. Jasper provides the tools, but the responsibility for their use lies with the individual user."
229
- # )
230
- # gr.on(
231
- # [run_button.click],
232
- # fn=infer,
233
- # inputs=[
234
- # seed,
235
- # randomize_seed,
236
- # input_im,
237
- # num_inference_steps,
238
- # upscale_factor,
239
- # controlnet_conditioning_scale,
240
- # ],
241
- # outputs=result,
242
- # show_api=False,
243
- # # show_progress="minimal",
244
- # )
245
-
246
- # demo.queue().launch(share=False, show_api=False)
247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
 
249
 
250
 
@@ -253,13 +467,15 @@
253
  import logging
254
  import random
255
  import warnings
256
- import os,shutil,subprocess
 
 
257
  import torch
258
  import numpy as np
259
  from diffusers import FluxControlNetModel
260
  from diffusers.pipelines import FluxControlNetPipeline
261
  from PIL import Image
262
- from huggingface_hub import snapshot_download,login
263
  import io
264
  import base64
265
  from flask import Flask, request, jsonify
@@ -267,22 +483,26 @@ from concurrent.futures import ThreadPoolExecutor
267
  from flask_cors import CORS
268
  from tqdm import tqdm
269
 
 
 
 
 
270
  app = Flask(__name__)
271
  CORS(app)
272
 
273
  # Function to check disk usage
274
  def check_disk_space():
275
  result = subprocess.run(['df', '-h'], capture_output=True, text=True)
276
- print(result.stdout)
277
 
278
  # Function to clear Hugging Face cache
279
  def clear_huggingface_cache():
280
  cache_dir = os.path.expanduser('~/.cache/huggingface')
281
  if os.path.exists(cache_dir):
282
  shutil.rmtree(cache_dir) # Removes the entire cache directory
283
- print(f"Cleared Hugging Face cache at: {cache_dir}")
284
  else:
285
- print("No Hugging Face cache found.")
286
 
287
  # Check disk space
288
  check_disk_space()
@@ -299,38 +519,45 @@ executor = ThreadPoolExecutor()
299
  # Determine the device (GPU or CPU)
300
  if torch.cuda.is_available():
301
  device = "cuda"
 
302
  else:
303
  device = "cpu"
 
304
 
305
  # Load model from Huggingface Hub
306
  huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
307
  if huggingface_token:
308
  login(token=huggingface_token)
 
309
  else:
310
- print("Hugging Face token not found in environment variables.")
311
- print(huggingface_token)
 
 
 
312
  with tqdm(total=100, desc="Downloading model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
313
- model_path = snapshot_download(
314
- repo_id="black-forest-labs/FLUX.1-dev",
315
- repo_type="model",
316
- ignore_patterns=["*.md", "*..gitattributes"],
317
- local_dir="FLUX.1-dev",
318
- token=huggingface_token)
 
319
 
320
  # Load pipeline
321
- print('controlnet enters')
322
- with tqdm(total=100, desc="Downloading model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
323
- controlnet = FluxControlNetModel.from_pretrained(
324
- "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
325
- ).to(device)
326
- print('controlnet exits')
327
- print('pipe enters')
328
- with tqdm(total=100, desc="Downloading model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
329
- pipe = FluxControlNetPipeline.from_pretrained(
330
- model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
331
- ).to(device)
332
- # pipe.to(device)
333
- print('pipe exits')
334
 
335
  MAX_SEED = 1000000
336
  MAX_PIXEL_BUDGET = 1024 * 1024
@@ -359,6 +586,7 @@ def process_input(input_image, upscale_factor):
359
  return input_image.resize((w, h)), was_resized
360
 
361
  def run_inference(process_id, input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale):
 
362
  input_image, was_resized = process_input(input_image, upscale_factor)
363
 
364
  # Rescale image for ControlNet processing
@@ -369,6 +597,7 @@ def run_inference(process_id, input_image, upscale_factor, seed, num_inference_s
369
  generator = torch.Generator().manual_seed(seed)
370
 
371
  # Perform inference using the pipeline
 
372
  image = pipe(
373
  prompt="",
374
  control_image=control_image,
@@ -392,6 +621,7 @@ def run_inference(process_id, input_image, upscale_factor, seed, num_inference_s
392
 
393
  # Store the result in the shared dictionary
394
  app.config['image_outputs'][process_id] = image_base64
 
395
 
396
  @app.route('/infer', methods=['POST'])
397
  def infer():
@@ -405,6 +635,7 @@ def infer():
405
  # Randomize seed if specified
406
  if randomize_seed:
407
  seed = random.randint(0, MAX_SEED)
 
408
 
409
  # Load and process the input image
410
  input_image_data = base64.b64decode(data['input_image'])
@@ -412,6 +643,7 @@ def infer():
412
 
413
  # Create a unique process ID for this request
414
  process_id = str(random.randint(1000, 9999))
 
415
 
416
  # Set the status to 'in_progress'
417
  app.config['image_outputs'][process_id] = None
@@ -433,6 +665,7 @@ def status():
433
 
434
  # Check if process_id was provided
435
  if not process_id:
 
436
  return jsonify({
437
  "status": "error",
438
  "message": "Process ID is required"
@@ -440,6 +673,7 @@ def status():
440
 
441
  # Check if the process_id exists in the dictionary
442
  if process_id not in app.config['image_outputs']:
 
443
  return jsonify({
444
  "status": "error",
445
  "message": "Invalid process ID"
@@ -448,10 +682,12 @@ def status():
448
  # Check the status of the image processing
449
  image_base64 = app.config['image_outputs'][process_id]
450
  if image_base64 is None:
 
451
  return jsonify({
452
  "status": "in_progress"
453
  })
454
  else:
 
455
  return jsonify({
456
  "status": "completed",
457
  "output_image": image_base64
@@ -460,3 +696,4 @@ def status():
460
  if __name__ == '__main__':
461
  app.run(debug=True)
462
 
 
 
1
+ # # import logging
2
+ # # import random
3
+ # # import warnings
4
+ # # import os
5
+ # # import gradio as gr
6
+ # # import numpy as np
7
+ # # import spaces
8
+ # # import torch
9
+ # # from diffusers import FluxControlNetModel
10
+ # # from diffusers.pipelines import FluxControlNetPipeline
11
+ # # from gradio_imageslider import ImageSlider
12
+ # # from PIL import Image
13
+ # # from huggingface_hub import snapshot_download
14
+
15
+ # # css = """
16
+ # # #col-container {
17
+ # # margin: 0 auto;
18
+ # # max-width: 512px;
19
+ # # }
20
+ # # """
21
+
22
+ # # if torch.cuda.is_available():
23
+ # # power_device = "GPU"
24
+ # # device = "cuda"
25
+ # # else:
26
+ # # power_device = "CPU"
27
+ # # device = "cpu"
28
+
29
+
30
+ # # huggingface_token = os.getenv("HUGGINFACE_TOKEN")
31
+
32
+ # # model_path = snapshot_download(
33
+ # # repo_id="black-forest-labs/FLUX.1-dev",
34
+ # # repo_type="model",
35
+ # # ignore_patterns=["*.md", "*..gitattributes"],
36
+ # # local_dir="FLUX.1-dev",
37
+ # # token=huggingface_token, # type a new token-id.
38
+ # # )
39
+
40
+
41
+ # # # Load pipeline
42
+ # # controlnet = FluxControlNetModel.from_pretrained(
43
+ # # "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
44
+ # # ).to(device)
45
+ # # pipe = FluxControlNetPipeline.from_pretrained(
46
+ # # model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
47
+ # # )
48
+ # # pipe.to(device)
49
+
50
+ # # MAX_SEED = 1000000
51
+ # # MAX_PIXEL_BUDGET = 1024 * 1024
52
+
53
+
54
+ # # def process_input(input_image, upscale_factor, **kwargs):
55
+ # # w, h = input_image.size
56
+ # # w_original, h_original = w, h
57
+ # # aspect_ratio = w / h
58
+
59
+ # # was_resized = False
60
+
61
+ # # if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
62
+ # # warnings.warn(
63
+ # # f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels."
64
+ # # )
65
+ # # gr.Info(
66
+ # # f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing input to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels budget."
67
+ # # )
68
+ # # input_image = input_image.resize(
69
+ # # (
70
+ # # int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
71
+ # # int(MAX_PIXEL_BUDGET**0.5 // aspect_ratio // upscale_factor),
72
+ # # )
73
+ # # )
74
+ # # was_resized = True
75
+
76
+ # # # resize to multiple of 8
77
+ # # w, h = input_image.size
78
+ # # w = w - w % 8
79
+ # # h = h - h % 8
80
+
81
+ # # return input_image.resize((w, h)), w_original, h_original, was_resized
82
+
83
+
84
+ # # @spaces.GPU#(duration=42)
85
+ # # def infer(
86
+ # # seed,
87
+ # # randomize_seed,
88
+ # # input_image,
89
+ # # num_inference_steps,
90
+ # # upscale_factor,
91
+ # # controlnet_conditioning_scale,
92
+ # # progress=gr.Progress(track_tqdm=True),
93
+ # # ):
94
+ # # if randomize_seed:
95
+ # # seed = random.randint(0, MAX_SEED)
96
+ # # true_input_image = input_image
97
+ # # input_image, w_original, h_original, was_resized = process_input(
98
+ # # input_image, upscale_factor
99
+ # # )
100
+
101
+ # # # rescale with upscale factor
102
+ # # w, h = input_image.size
103
+ # # control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
104
+
105
+ # # generator = torch.Generator().manual_seed(seed)
106
+
107
+ # # gr.Info("Upscaling image...")
108
+ # # image = pipe(
109
+ # # prompt="",
110
+ # # control_image=control_image,
111
+ # # controlnet_conditioning_scale=controlnet_conditioning_scale,
112
+ # # num_inference_steps=num_inference_steps,
113
+ # # guidance_scale=3.5,
114
+ # # height=control_image.size[1],
115
+ # # width=control_image.size[0],
116
+ # # generator=generator,
117
+ # # ).images[0]
118
+
119
+ # # if was_resized:
120
+ # # gr.Info(
121
+ # # f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
122
+ # # )
123
+
124
+ # # # resize to target desired size
125
+ # # image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
126
+ # # image.save("output.jpg")
127
+ # # # convert to numpy
128
+ # # return [true_input_image, image, seed]
129
+
130
+
131
+ # # with gr.Blocks(css=css) as demo:
132
+ # # # with gr.Column(elem_id="col-container"):
133
+ # # gr.Markdown(
134
+ # # f"""
135
+ # # # ⚡ Flux.1-dev Upscaler ControlNet ⚡
136
+ # # This is an interactive demo of [Flux.1-dev Upscaler ControlNet](https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Upscaler) taking as input a low resolution image to generate a high resolution image.
137
+ # # Currently running on {power_device}.
138
+
139
+ # # *Note*: Even though the model can handle higher resolution images, due to GPU memory constraints, this demo was limited to a generated output not exceeding a pixel budget of 1024x1024. If the requested size exceeds that limit, the input will be first resized keeping the aspect ratio such that the output of the controlNet model does not exceed the allocated pixel budget. The output is then resized to the targeted shape using a simple resizing. This may explain some artifacts for high resolution input. To adress this, run the demo locally or consider implementing a tiling strategy. Happy upscaling! 🚀
140
+ # # """
141
+ # # )
142
+
143
+ # # with gr.Row():
144
+ # # run_button = gr.Button(value="Run")
145
+
146
+ # # with gr.Row():
147
+ # # with gr.Column(scale=4):
148
+ # # input_im = gr.Image(label="Input Image", type="pil")
149
+ # # with gr.Column(scale=1):
150
+ # # num_inference_steps = gr.Slider(
151
+ # # label="Number of Inference Steps",
152
+ # # minimum=8,
153
+ # # maximum=50,
154
+ # # step=1,
155
+ # # value=28,
156
+ # # )
157
+ # # upscale_factor = gr.Slider(
158
+ # # label="Upscale Factor",
159
+ # # minimum=1,
160
+ # # maximum=4,
161
+ # # step=1,
162
+ # # value=4,
163
+ # # )
164
+ # # controlnet_conditioning_scale = gr.Slider(
165
+ # # label="Controlnet Conditioning Scale",
166
+ # # minimum=0.1,
167
+ # # maximum=1.5,
168
+ # # step=0.1,
169
+ # # value=0.6,
170
+ # # )
171
+ # # seed = gr.Slider(
172
+ # # label="Seed",
173
+ # # minimum=0,
174
+ # # maximum=MAX_SEED,
175
+ # # step=1,
176
+ # # value=42,
177
+ # # )
178
+
179
+ # # randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
180
+
181
+ # # with gr.Row():
182
+ # # result = ImageSlider(label="Input / Output", type="pil", interactive=True)
183
+
184
+ # # examples = gr.Examples(
185
+ # # examples=[
186
+ # # # [42, False, "examples/image_1.jpg", 28, 4, 0.6],
187
+ # # [42, False, "examples/image_2.jpg", 28, 4, 0.6],
188
+ # # # [42, False, "examples/image_3.jpg", 28, 4, 0.6],
189
+ # # [42, False, "examples/image_4.jpg", 28, 4, 0.6],
190
+ # # # [42, False, "examples/image_5.jpg", 28, 4, 0.6],
191
+ # # # [42, False, "examples/image_6.jpg", 28, 4, 0.6],
192
+ # # ],
193
+ # # inputs=[
194
+ # # seed,
195
+ # # randomize_seed,
196
+ # # input_im,
197
+ # # num_inference_steps,
198
+ # # upscale_factor,
199
+ # # controlnet_conditioning_scale,
200
+ # # ],
201
+ # # fn=infer,
202
+ # # outputs=result,
203
+ # # cache_examples="lazy",
204
+ # # )
205
+
206
+ # # # examples = gr.Examples(
207
+ # # # examples=[
208
+ # # # #[42, False, "examples/image_1.jpg", 28, 4, 0.6],
209
+ # # # [42, False, "examples/image_2.jpg", 28, 4, 0.6],
210
+ # # # #[42, False, "examples/image_3.jpg", 28, 4, 0.6],
211
+ # # # #[42, False, "examples/image_4.jpg", 28, 4, 0.6],
212
+ # # # [42, False, "examples/image_5.jpg", 28, 4, 0.6],
213
+ # # # [42, False, "examples/image_6.jpg", 28, 4, 0.6],
214
+ # # # [42, False, "examples/image_7.jpg", 28, 4, 0.6],
215
+ # # # ],
216
+ # # # inputs=[
217
+ # # # seed,
218
+ # # # randomize_seed,
219
+ # # # input_im,
220
+ # # # num_inference_steps,
221
+ # # # upscale_factor,
222
+ # # # controlnet_conditioning_scale,
223
+ # # # ],
224
+ # # # )
225
+
226
+ # # gr.Markdown("**Disclaimer:**")
227
+ # # gr.Markdown(
228
+ # # "This demo is only for research purpose. Jasper cannot be held responsible for the generation of NSFW (Not Safe For Work) content through the use of this demo. Users are solely responsible for any content they create, and it is their obligation to ensure that it adheres to appropriate and ethical standards. Jasper provides the tools, but the responsibility for their use lies with the individual user."
229
+ # # )
230
+ # # gr.on(
231
+ # # [run_button.click],
232
+ # # fn=infer,
233
+ # # inputs=[
234
+ # # seed,
235
+ # # randomize_seed,
236
+ # # input_im,
237
+ # # num_inference_steps,
238
+ # # upscale_factor,
239
+ # # controlnet_conditioning_scale,
240
+ # # ],
241
+ # # outputs=result,
242
+ # # show_api=False,
243
+ # # # show_progress="minimal",
244
+ # # )
245
+
246
+ # # demo.queue().launch(share=False, show_api=False)
247
+
248
+
249
+
250
+
251
+
252
+
253
  # import logging
254
  # import random
255
  # import warnings
256
+ # import os,shutil,subprocess
 
 
 
257
  # import torch
258
+ # import numpy as np
259
  # from diffusers import FluxControlNetModel
260
  # from diffusers.pipelines import FluxControlNetPipeline
 
261
  # from PIL import Image
262
+ # from huggingface_hub import snapshot_download,login
263
+ # import io
264
+ # import base64
265
+ # from flask import Flask, request, jsonify
266
+ # from concurrent.futures import ThreadPoolExecutor
267
+ # from flask_cors import CORS
268
+ # from tqdm import tqdm
269
+
270
+ # app = Flask(__name__)
271
+ # CORS(app)
272
+
273
+ # # Function to check disk usage
274
+ # def check_disk_space():
275
+ # result = subprocess.run(['df', '-h'], capture_output=True, text=True)
276
+ # print(result.stdout)
277
+
278
+ # # Function to clear Hugging Face cache
279
+ # def clear_huggingface_cache():
280
+ # cache_dir = os.path.expanduser('~/.cache/huggingface')
281
+ # if os.path.exists(cache_dir):
282
+ # shutil.rmtree(cache_dir) # Removes the entire cache directory
283
+ # print(f"Cleared Hugging Face cache at: {cache_dir}")
284
+ # else:
285
+ # print("No Hugging Face cache found.")
286
+
287
+ # # Check disk space
288
+ # check_disk_space()
289
+
290
+ # # Clear Hugging Face cache
291
+ # clear_huggingface_cache()
292
+
293
+ # # Add config to store base64 images
294
+ # app.config['image_outputs'] = {}
295
+
296
+ # # ThreadPoolExecutor for managing image processing threads
297
+ # executor = ThreadPoolExecutor()
298
+
299
+ # # Determine the device (GPU or CPU)
300
  # if torch.cuda.is_available():
 
301
  # device = "cuda"
302
  # else:
 
303
  # device = "cpu"
304
 
305
+ # # Load model from Huggingface Hub
306
+ # huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
307
+ # if huggingface_token:
308
+ # login(token=huggingface_token)
309
+ # else:
310
+ # print("Hugging Face token not found in environment variables.")
311
+ # print(huggingface_token)
312
+ # with tqdm(total=100, desc="Downloading model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
313
+ # model_path = snapshot_download(
314
  # repo_id="black-forest-labs/FLUX.1-dev",
315
  # repo_type="model",
316
  # ignore_patterns=["*.md", "*..gitattributes"],
317
  # local_dir="FLUX.1-dev",
318
+ # token=huggingface_token)
 
 
319
 
320
  # # Load pipeline
321
+ # print('controlnet enters')
322
+ # with tqdm(total=100, desc="Downloading model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
323
+ # controlnet = FluxControlNetModel.from_pretrained(
324
  # "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
325
  # ).to(device)
326
+ # print('controlnet exits')
327
+ # print('pipe enters')
328
+ # with tqdm(total=100, desc="Downloading model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
329
+ # pipe = FluxControlNetPipeline.from_pretrained(
330
  # model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
331
+ # ).to(device)
332
+ # # pipe.to(device)
333
+ # print('pipe exits')
334
 
335
  # MAX_SEED = 1000000
336
  # MAX_PIXEL_BUDGET = 1024 * 1024
337
 
338
+ # def process_input(input_image, upscale_factor):
 
339
  # w, h = input_image.size
 
340
  # aspect_ratio = w / h
 
341
  # was_resized = False
342
 
343
+ # # Resize if input size exceeds the maximum pixel budget
344
  # if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
345
+ # warnings.warn(f"Requested output image is too large. Resizing to fit within pixel budget.")
 
 
 
 
 
346
  # input_image = input_image.resize(
347
  # (
348
  # int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
 
351
  # )
352
  # was_resized = True
353
 
354
+ # # Adjust dimensions to be a multiple of 8
355
  # w, h = input_image.size
356
  # w = w - w % 8
357
  # h = h - h % 8
358
 
359
+ # return input_image.resize((w, h)), was_resized
360
 
361
+ # def run_inference(process_id, input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale):
362
+ # input_image, was_resized = process_input(input_image, upscale_factor)
363
 
364
+ # # Rescale image for ControlNet processing
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
365
  # w, h = input_image.size
366
  # control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
367
 
368
+ # # Set the random generator for inference
369
  # generator = torch.Generator().manual_seed(seed)
370
 
371
+ # # Perform inference using the pipeline
372
  # image = pipe(
373
  # prompt="",
374
  # control_image=control_image,
 
380
  # generator=generator,
381
  # ).images[0]
382
 
383
+ # # Resize output image back to the original dimensions if needed
384
  # if was_resized:
385
+ # original_size = (input_image.width * upscale_factor, input_image.height * upscale_factor)
386
+ # image = image.resize(original_size)
387
+
388
+ # # Convert the output image to base64
389
+ # buffered = io.BytesIO()
390
+ # image.save(buffered, format="JPEG")
391
+ # image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
392
+
393
+ # # Store the result in the shared dictionary
394
+ # app.config['image_outputs'][process_id] = image_base64
395
+
396
+ # @app.route('/infer', methods=['POST'])
397
+ # def infer():
398
+ # data = request.json
399
+ # seed = data.get("seed", 42)
400
+ # randomize_seed = data.get("randomize_seed", True)
401
+ # num_inference_steps = data.get("num_inference_steps", 28)
402
+ # upscale_factor = data.get("upscale_factor", 4)
403
+ # controlnet_conditioning_scale = data.get("controlnet_conditioning_scale", 0.6)
404
+
405
+ # # Randomize seed if specified
406
+ # if randomize_seed:
407
+ # seed = random.randint(0, MAX_SEED)
408
 
409
+ # # Load and process the input image
410
+ # input_image_data = base64.b64decode(data['input_image'])
411
+ # input_image = Image.open(io.BytesIO(input_image_data))
412
+
413
+ # # Create a unique process ID for this request
414
+ # process_id = str(random.randint(1000, 9999))
415
+
416
+ # # Set the status to 'in_progress'
417
+ # app.config['image_outputs'][process_id] = None
418
+
419
+ # # Run the inference in a separate thread
420
+ # executor.submit(run_inference, process_id, input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
421
 
422
+ # # Return the process ID
423
+ # return jsonify({
424
+ # "process_id": process_id,
425
+ # "message": "Processing started"
426
+ # })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
427
 
428
+ # # Modify status endpoint to receive process_id in request body
429
+ # @app.route('/status', methods=['POST'])
430
+ # def status():
431
+ # data = request.json
432
+ # process_id = data.get('process_id')
433
+
434
+ # # Check if process_id was provided
435
+ # if not process_id:
436
+ # return jsonify({
437
+ # "status": "error",
438
+ # "message": "Process ID is required"
439
+ # }), 400
440
+
441
+ # # Check if the process_id exists in the dictionary
442
+ # if process_id not in app.config['image_outputs']:
443
+ # return jsonify({
444
+ # "status": "error",
445
+ # "message": "Invalid process ID"
446
+ # }), 404
447
+
448
+ # # Check the status of the image processing
449
+ # image_base64 = app.config['image_outputs'][process_id]
450
+ # if image_base64 is None:
451
+ # return jsonify({
452
+ # "status": "in_progress"
453
+ # })
454
+ # else:
455
+ # return jsonify({
456
+ # "status": "completed",
457
+ # "output_image": image_base64
458
+ # })
459
+
460
+ # if __name__ == '__main__':
461
+ # app.run(debug=True)
462
 
463
 
464
 
 
467
  import logging
468
  import random
469
  import warnings
470
+ import os
471
+ import shutil
472
+ import subprocess
473
  import torch
474
  import numpy as np
475
  from diffusers import FluxControlNetModel
476
  from diffusers.pipelines import FluxControlNetPipeline
477
  from PIL import Image
478
+ from huggingface_hub import snapshot_download, login
479
  import io
480
  import base64
481
  from flask import Flask, request, jsonify
 
483
  from flask_cors import CORS
484
  from tqdm import tqdm
485
 
486
+ # Configure logging
487
+ logging.basicConfig(level=logging.INFO)
488
+ logger = logging.getLogger(__name__)
489
+
490
  app = Flask(__name__)
491
  CORS(app)
492
 
493
  # Function to check disk usage
494
  def check_disk_space():
495
  result = subprocess.run(['df', '-h'], capture_output=True, text=True)
496
+ logger.info("Disk space usage:\n%s", result.stdout)
497
 
498
  # Function to clear Hugging Face cache
499
  def clear_huggingface_cache():
500
  cache_dir = os.path.expanduser('~/.cache/huggingface')
501
  if os.path.exists(cache_dir):
502
  shutil.rmtree(cache_dir) # Removes the entire cache directory
503
+ logger.info("Cleared Hugging Face cache at: %s", cache_dir)
504
  else:
505
+ logger.info("No Hugging Face cache found.")
506
 
507
  # Check disk space
508
  check_disk_space()
 
519
  # Determine the device (GPU or CPU)
520
  if torch.cuda.is_available():
521
  device = "cuda"
522
+ logger.info("CUDA is available. Using GPU.")
523
  else:
524
  device = "cpu"
525
+ logger.info("CUDA is not available. Using CPU.")
526
 
527
  # Load model from Huggingface Hub
528
  huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
529
  if huggingface_token:
530
  login(token=huggingface_token)
531
+ logger.info("Hugging Face token found and logged in.")
532
  else:
533
+ logger.warning("Hugging Face token not found in environment variables.")
534
+
535
+ logger.info("Hugging Face token: %s", huggingface_token)
536
+
537
+ # Download model using snapshot_download
538
  with tqdm(total=100, desc="Downloading model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
539
+ model_path = snapshot_download(
540
+ repo_id="black-forest-labs/FLUX.1-dev",
541
+ repo_type="model",
542
+ ignore_patterns=["*.md", "*..gitattributes"],
543
+ local_dir="FLUX.1-dev",
544
+ token=huggingface_token)
545
+ logger.info("Model downloaded to: %s", model_path)
546
 
547
  # Load pipeline
548
+ logger.info('Loading ControlNet model.')
549
+ with tqdm(total=100, desc="Downloading ControlNet model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
550
+ controlnet = FluxControlNetModel.from_pretrained(
551
+ "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
552
+ ).to(device)
553
+ logger.info("ControlNet model loaded successfully.")
554
+
555
+ logger.info('Loading pipeline.')
556
+ with tqdm(total=100, desc="Downloading pipeline", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
557
+ pipe = FluxControlNetPipeline.from_pretrained(
558
+ model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
559
+ ).to(device)
560
+ logger.info("Pipeline loaded successfully.")
561
 
562
  MAX_SEED = 1000000
563
  MAX_PIXEL_BUDGET = 1024 * 1024
 
586
  return input_image.resize((w, h)), was_resized
587
 
588
  def run_inference(process_id, input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale):
589
+ logger.info("Processing inference for process_id: %s", process_id)
590
  input_image, was_resized = process_input(input_image, upscale_factor)
591
 
592
  # Rescale image for ControlNet processing
 
597
  generator = torch.Generator().manual_seed(seed)
598
 
599
  # Perform inference using the pipeline
600
+ logger.info("Running pipeline for process_id: %s", process_id)
601
  image = pipe(
602
  prompt="",
603
  control_image=control_image,
 
621
 
622
  # Store the result in the shared dictionary
623
  app.config['image_outputs'][process_id] = image_base64
624
+ logger.info("Inference completed for process_id: %s", process_id)
625
 
626
  @app.route('/infer', methods=['POST'])
627
  def infer():
 
635
  # Randomize seed if specified
636
  if randomize_seed:
637
  seed = random.randint(0, MAX_SEED)
638
+ logger.info("Seed randomized to: %d", seed)
639
 
640
  # Load and process the input image
641
  input_image_data = base64.b64decode(data['input_image'])
 
643
 
644
  # Create a unique process ID for this request
645
  process_id = str(random.randint(1000, 9999))
646
+ logger.info("Process started with process_id: %s", process_id)
647
 
648
  # Set the status to 'in_progress'
649
  app.config['image_outputs'][process_id] = None
 
665
 
666
  # Check if process_id was provided
667
  if not process_id:
668
+ logger.error("Process ID not provided in request.")
669
  return jsonify({
670
  "status": "error",
671
  "message": "Process ID is required"
 
673
 
674
  # Check if the process_id exists in the dictionary
675
  if process_id not in app.config['image_outputs']:
676
+ logger.error("Invalid process ID: %s", process_id)
677
  return jsonify({
678
  "status": "error",
679
  "message": "Invalid process ID"
 
682
  # Check the status of the image processing
683
  image_base64 = app.config['image_outputs'][process_id]
684
  if image_base64 is None:
685
+ logger.info("Process ID %s is still in progress.", process_id)
686
  return jsonify({
687
  "status": "in_progress"
688
  })
689
  else:
690
+ logger.info("Process ID %s completed successfully.", process_id)
691
  return jsonify({
692
  "status": "completed",
693
  "output_image": image_base64
 
696
  if __name__ == '__main__':
697
  app.run(debug=True)
698
 
699
+