Nymbo commited on
Commit
b4abd41
1 Parent(s): 9826117

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -66
app.py CHANGED
@@ -12,7 +12,6 @@ from gradio_imageslider import ImageSlider
12
  from PIL import Image
13
  from huggingface_hub import snapshot_download
14
 
15
- # Define custom CSS styling for Gradio blocks
16
  css = """
17
  #col-container {
18
  margin: 0 auto;
@@ -20,69 +19,52 @@ css = """
20
  }
21
  """
22
 
23
- # Determine whether GPU is available, and set the device accordingly
24
  if torch.cuda.is_available():
25
  power_device = "GPU"
26
  device = "cuda"
27
- print("GPU is available. Using CUDA.")
28
  else:
29
  power_device = "CPU"
30
  device = "cpu"
31
- print("GPU is not available. Using CPU.")
32
 
33
- # Get Hugging Face token from environment variables
34
- huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
35
- print(f"Hugging Face token retrieved: {huggingface_token is not None}")
36
 
37
- # Download the model from the Hugging Face Hub
38
- print("Downloading model from Hugging Face Hub...")
39
  model_path = snapshot_download(
40
- repo_id="black-forest-labs/FLUX.1-dev",
41
- repo_type="model",
42
  ignore_patterns=["*.md", "*..gitattributes"],
43
  local_dir="FLUX.1-dev",
44
- token=huggingface_token,
45
  )
46
- print(f"Model downloaded to: {model_path}")
47
 
48
- # Load ControlNet model
49
- print("Loading ControlNet model...")
50
  controlnet = FluxControlNetModel.from_pretrained(
51
  "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
52
  ).to(device)
53
- print("ControlNet model loaded.")
54
-
55
- # Load the pipeline using the downloaded model and ControlNet
56
- print("Loading FluxControlNetPipeline...")
57
  pipe = FluxControlNetPipeline.from_pretrained(
58
  model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
59
  )
60
  pipe.to(device)
61
- print("Pipeline loaded.")
62
 
63
- # Define constants for seed generation and maximum pixel budget
64
  MAX_SEED = 1000000
65
  MAX_PIXEL_BUDGET = 1024 * 1024
66
 
67
- # Function to process input image before upscaling
68
  def process_input(input_image, upscale_factor, **kwargs):
69
- print(f"Processing input image with upscale factor: {upscale_factor}")
70
  w, h = input_image.size
71
  w_original, h_original = w, h
72
  aspect_ratio = w / h
73
 
74
  was_resized = False
75
 
76
- # Resize the input image if the output image would exceed the pixel budget
77
  if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
78
  warnings.warn(
79
  f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels."
80
  )
81
- print("Input image is too large, resizing...")
82
  gr.Info(
83
  f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing input to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels budget."
84
  )
85
- # Resize the input image to fit within the maximum pixel budget
86
  input_image = input_image.resize(
87
  (
88
  int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
@@ -90,18 +72,16 @@ def process_input(input_image, upscale_factor, **kwargs):
90
  )
91
  )
92
  was_resized = True
93
- print(f"Image resized to: {input_image.size}")
94
 
95
- # Ensure that the dimensions are multiples of 8 (required by the model)
96
  w, h = input_image.size
97
  w = w - w % 8
98
  h = h - h % 8
99
- print(f"Resizing image to be multiple of 8: ({w}, {h})")
100
 
101
  return input_image.resize((w, h)), w_original, h_original, was_resized
102
 
103
- # Define inference function with GPU duration hint
104
- @spaces.GPU(duration=42)
105
  def infer(
106
  seed,
107
  randomize_seed,
@@ -111,109 +91,96 @@ def infer(
111
  controlnet_conditioning_scale,
112
  progress=gr.Progress(track_tqdm=True),
113
  ):
114
- print(f"Starting inference with seed: {seed}, randomize_seed: {randomize_seed}")
115
- # Randomize the seed if the option is selected
116
  if randomize_seed:
117
  seed = random.randint(0, MAX_SEED)
118
- print(f"Randomized seed: {seed}")
119
  true_input_image = input_image
120
- # Process the input image for upscaling
121
  input_image, w_original, h_original, was_resized = process_input(
122
  input_image, upscale_factor
123
  )
124
- print(f"Processed input image. Original size: ({w_original}, {h_original}), Processed size: {input_image.size}")
125
 
126
- # Rescale the input image by the upscale factor
127
  w, h = input_image.size
128
  control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
129
- print(f"Control image resized to: {control_image.size}")
130
 
131
- # Create a random number generator with the provided seed
132
  generator = torch.Generator().manual_seed(seed)
133
 
134
  gr.Info("Upscaling image...")
135
- print("Running the pipeline to generate output image...")
136
- # Run the pipeline to generate the output image
137
  image = pipe(
138
- prompt="", # No specific prompt is used here
139
  control_image=control_image,
140
  controlnet_conditioning_scale=controlnet_conditioning_scale,
141
  num_inference_steps=num_inference_steps,
142
- guidance_scale=3.5, # Guidance scale for image generation
143
  height=control_image.size[1],
144
  width=control_image.size[0],
145
  generator=generator,
146
  ).images[0]
147
- print("Image generation completed.")
148
 
149
- # If the image was resized during processing, resize it back to the original target size
150
  if was_resized:
151
  gr.Info(
152
  f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
153
  )
154
- print(f"Resizing output image to original target size: ({w_original * upscale_factor}, {h_original * upscale_factor})")
155
 
156
- # Resize the generated image to the desired output size
157
  image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
158
- print(f"Final output image size: {image.size}")
159
  image.save("output.jpg")
160
- print("Output image saved as 'output.jpg'")
161
- # Return the original input image, generated image, and seed value
162
  return [true_input_image, image, seed]
163
 
164
- # Create the Gradio interface
165
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
 
166
  gr.HTML("<center><h1>FLUX.1-Dev Upscaler</h1></center>")
167
 
168
- # Define the button to start the upscaling process
169
  with gr.Row():
170
  run_button = gr.Button(value="Run")
171
 
172
- # Define the input elements for the upscaling parameters
173
  with gr.Row():
174
  with gr.Column(scale=4):
175
- input_im = gr.Image(label="Input Image", type="pil") # Input image
176
  with gr.Column(scale=1):
177
  num_inference_steps = gr.Slider(
178
- label="Number of Inference Steps", # Slider to set the number of inference steps
179
  minimum=8,
180
  maximum=50,
181
  step=1,
182
  value=28,
183
  )
184
  upscale_factor = gr.Slider(
185
- label="Upscale Factor", # Slider to set the upscale factor
186
  minimum=1,
187
  maximum=4,
188
  step=1,
189
  value=4,
190
  )
191
  controlnet_conditioning_scale = gr.Slider(
192
- label="Controlnet Conditioning Scale", # Slider for controlnet conditioning scale
193
  minimum=0.1,
194
  maximum=1.5,
195
  step=0.1,
196
  value=0.6,
197
  )
198
  seed = gr.Slider(
199
- label="Seed", # Slider to set the random seed
200
  minimum=0,
201
  maximum=MAX_SEED,
202
  step=1,
203
  value=42,
204
  )
205
 
206
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True) # Checkbox to randomize the seed
207
 
208
- # Define the output element to display the input and output images
209
  with gr.Row():
210
  result = ImageSlider(label="Input / Output", type="pil", interactive=True)
211
 
212
- # Define examples for users to try out
213
  examples = gr.Examples(
214
  examples=[
 
215
  [42, False, "examples/image_2.jpg", 28, 4, 0.6],
 
216
  [42, False, "examples/image_4.jpg", 28, 4, 0.6],
 
 
217
  ],
218
  inputs=[
219
  seed,
@@ -223,12 +190,31 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
223
  upscale_factor,
224
  controlnet_conditioning_scale,
225
  ],
226
- fn=infer, # Function to call for the examples
227
  outputs=result,
228
  cache_examples="lazy",
229
  )
230
 
231
- # Define the action for the run button
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  gr.on(
233
  [run_button.click],
234
  fn=infer,
@@ -242,9 +228,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
242
  ],
243
  outputs=result,
244
  show_api=False,
 
245
  )
246
 
247
- # Launch the Gradio app
248
- # The queue is used to handle multiple requests, sharing is disabled for privacy
249
- print("Launching Gradio app...")
250
  demo.queue().launch(share=False, show_api=False)
 
12
  from PIL import Image
13
  from huggingface_hub import snapshot_download
14
 
 
15
  css = """
16
  #col-container {
17
  margin: 0 auto;
 
19
  }
20
  """
21
 
 
22
  if torch.cuda.is_available():
23
  power_device = "GPU"
24
  device = "cuda"
 
25
  else:
26
  power_device = "CPU"
27
  device = "cpu"
 
28
 
 
 
 
29
 
30
+ huggingface_token = os.getenv("HUGGINFACE_TOKEN")
31
+
32
  model_path = snapshot_download(
33
+ repo_id="black-forest-labs/FLUX.1-dev",
34
+ repo_type="model",
35
  ignore_patterns=["*.md", "*..gitattributes"],
36
  local_dir="FLUX.1-dev",
37
+ token=huggingface_token, # type a new token-id.
38
  )
 
39
 
40
+
41
+ # Load pipeline
42
  controlnet = FluxControlNetModel.from_pretrained(
43
  "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
44
  ).to(device)
 
 
 
 
45
  pipe = FluxControlNetPipeline.from_pretrained(
46
  model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
47
  )
48
  pipe.to(device)
 
49
 
 
50
  MAX_SEED = 1000000
51
  MAX_PIXEL_BUDGET = 1024 * 1024
52
 
53
+
54
  def process_input(input_image, upscale_factor, **kwargs):
 
55
  w, h = input_image.size
56
  w_original, h_original = w, h
57
  aspect_ratio = w / h
58
 
59
  was_resized = False
60
 
 
61
  if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
62
  warnings.warn(
63
  f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels."
64
  )
 
65
  gr.Info(
66
  f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing input to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels budget."
67
  )
 
68
  input_image = input_image.resize(
69
  (
70
  int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
 
72
  )
73
  )
74
  was_resized = True
 
75
 
76
+ # resize to multiple of 8
77
  w, h = input_image.size
78
  w = w - w % 8
79
  h = h - h % 8
 
80
 
81
  return input_image.resize((w, h)), w_original, h_original, was_resized
82
 
83
+
84
+ @spaces.GPU#(duration=42)
85
  def infer(
86
  seed,
87
  randomize_seed,
 
91
  controlnet_conditioning_scale,
92
  progress=gr.Progress(track_tqdm=True),
93
  ):
 
 
94
  if randomize_seed:
95
  seed = random.randint(0, MAX_SEED)
 
96
  true_input_image = input_image
 
97
  input_image, w_original, h_original, was_resized = process_input(
98
  input_image, upscale_factor
99
  )
 
100
 
101
+ # rescale with upscale factor
102
  w, h = input_image.size
103
  control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
 
104
 
 
105
  generator = torch.Generator().manual_seed(seed)
106
 
107
  gr.Info("Upscaling image...")
 
 
108
  image = pipe(
109
+ prompt="",
110
  control_image=control_image,
111
  controlnet_conditioning_scale=controlnet_conditioning_scale,
112
  num_inference_steps=num_inference_steps,
113
+ guidance_scale=3.5,
114
  height=control_image.size[1],
115
  width=control_image.size[0],
116
  generator=generator,
117
  ).images[0]
 
118
 
 
119
  if was_resized:
120
  gr.Info(
121
  f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
122
  )
 
123
 
124
+ # resize to target desired size
125
  image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
 
126
  image.save("output.jpg")
127
+ # convert to numpy
 
128
  return [true_input_image, image, seed]
129
 
130
+
131
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
132
+ # with gr.Column(elem_id="col-container"):
133
  gr.HTML("<center><h1>FLUX.1-Dev Upscaler</h1></center>")
134
 
 
135
  with gr.Row():
136
  run_button = gr.Button(value="Run")
137
 
 
138
  with gr.Row():
139
  with gr.Column(scale=4):
140
+ input_im = gr.Image(label="Input Image", type="pil")
141
  with gr.Column(scale=1):
142
  num_inference_steps = gr.Slider(
143
+ label="Number of Inference Steps",
144
  minimum=8,
145
  maximum=50,
146
  step=1,
147
  value=28,
148
  )
149
  upscale_factor = gr.Slider(
150
+ label="Upscale Factor",
151
  minimum=1,
152
  maximum=4,
153
  step=1,
154
  value=4,
155
  )
156
  controlnet_conditioning_scale = gr.Slider(
157
+ label="Controlnet Conditioning Scale",
158
  minimum=0.1,
159
  maximum=1.5,
160
  step=0.1,
161
  value=0.6,
162
  )
163
  seed = gr.Slider(
164
+ label="Seed",
165
  minimum=0,
166
  maximum=MAX_SEED,
167
  step=1,
168
  value=42,
169
  )
170
 
171
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
172
 
 
173
  with gr.Row():
174
  result = ImageSlider(label="Input / Output", type="pil", interactive=True)
175
 
 
176
  examples = gr.Examples(
177
  examples=[
178
+ # [42, False, "examples/image_1.jpg", 28, 4, 0.6],
179
  [42, False, "examples/image_2.jpg", 28, 4, 0.6],
180
+ # [42, False, "examples/image_3.jpg", 28, 4, 0.6],
181
  [42, False, "examples/image_4.jpg", 28, 4, 0.6],
182
+ # [42, False, "examples/image_5.jpg", 28, 4, 0.6],
183
+ # [42, False, "examples/image_6.jpg", 28, 4, 0.6],
184
  ],
185
  inputs=[
186
  seed,
 
190
  upscale_factor,
191
  controlnet_conditioning_scale,
192
  ],
193
+ fn=infer,
194
  outputs=result,
195
  cache_examples="lazy",
196
  )
197
 
198
+ # examples = gr.Examples(
199
+ # examples=[
200
+ # #[42, False, "examples/image_1.jpg", 28, 4, 0.6],
201
+ # [42, False, "examples/image_2.jpg", 28, 4, 0.6],
202
+ # #[42, False, "examples/image_3.jpg", 28, 4, 0.6],
203
+ # #[42, False, "examples/image_4.jpg", 28, 4, 0.6],
204
+ # [42, False, "examples/image_5.jpg", 28, 4, 0.6],
205
+ # [42, False, "examples/image_6.jpg", 28, 4, 0.6],
206
+ # [42, False, "examples/image_7.jpg", 28, 4, 0.6],
207
+ # ],
208
+ # inputs=[
209
+ # seed,
210
+ # randomize_seed,
211
+ # input_im,
212
+ # num_inference_steps,
213
+ # upscale_factor,
214
+ # controlnet_conditioning_scale,
215
+ # ],
216
+ # )
217
+
218
  gr.on(
219
  [run_button.click],
220
  fn=infer,
 
228
  ],
229
  outputs=result,
230
  show_api=False,
231
+ # show_progress="minimal",
232
  )
233
 
 
 
 
234
  demo.queue().launch(share=False, show_api=False)