SkalskiP commited on
Commit
3dc751e
1 Parent(s): 40b82c8

test no HF_TOKEN solution

Browse files
Files changed (1) hide show
  1. app.py +56 -59
app.py CHANGED
@@ -1,10 +1,8 @@
 
1
  from typing import Tuple
2
 
3
- import os
4
- import requests
5
- import random
6
- import numpy as np
7
  import gradio as gr
 
8
  import spaces
9
  import torch
10
  from PIL import Image, ImageFilter
@@ -23,8 +21,7 @@ MAX_SEED = np.iinfo(np.int32).max
23
  IMAGE_SIZE = 1024
24
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
25
 
26
- HF_TOKEN = os.environ.get("HF_TOKEN", None)
27
- client = Client("SkalskiP/florence-sam-masking", hf_token=HF_TOKEN)
28
 
29
 
30
  def remove_background(image: Image.Image, threshold: int = 50) -> Image.Image:
@@ -42,34 +39,34 @@ def remove_background(image: Image.Image, threshold: int = 50) -> Image.Image:
42
  return image
43
 
44
 
45
- EXAMPLES = [
46
- [
47
- {
48
- "background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
49
- "layers": [remove_background(Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-2.png", stream=True).raw))],
50
- "composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-2.png", stream=True).raw),
51
- },
52
- "little lion",
53
- None,
54
- 42,
55
- False,
56
- 0.85,
57
- 30
58
- ],
59
- [
60
- {
61
- "background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
62
- "layers": [remove_background(Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-3.png", stream=True).raw))],
63
- "composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-3.png", stream=True).raw),
64
- },
65
- "tattoos",
66
- None,
67
- 42,
68
- False,
69
- 0.85,
70
- 30
71
- ]
72
- ]
73
 
74
  pipe = FluxInpaintPipeline.from_pretrained(
75
  "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
@@ -143,16 +140,16 @@ def process(
143
 
144
  mask = mask.filter(ImageFilter.GaussianBlur(radius=5))
145
  width, height = resize_image_dimensions(original_resolution_wh=image.size)
146
- resized_image = image.resize((width, height), Image.LANCZOS)
147
- resized_mask = mask.resize((width, height), Image.LANCZOS)
148
 
149
  if randomize_seed_checkbox:
150
  seed_slicer = random.randint(0, MAX_SEED)
151
  generator = torch.Generator().manual_seed(seed_slicer)
152
  result = pipe(
153
  prompt=inpainting_prompt_text,
154
- image=resized_image,
155
- mask_image=resized_mask,
156
  width=width,
157
  height=height,
158
  strength=strength_slider,
@@ -160,7 +157,7 @@ def process(
160
  num_inference_steps=num_inference_steps_slider
161
  ).images[0]
162
  print('INFERENCE DONE')
163
- return result, resized_mask
164
 
165
 
166
  with gr.Blocks() as demo:
@@ -232,26 +229,26 @@ with gr.Blocks() as demo:
232
  with gr.Accordion("Debug", open=False):
233
  output_mask_component = gr.Image(
234
  type='pil', image_mode='RGB', label='Input mask', format="png")
235
- with gr.Row():
236
- gr.Examples(
237
- fn=process,
238
- examples=EXAMPLES,
239
- inputs=[
240
- input_image_editor_component,
241
- inpainting_prompt_text_component,
242
- masking_prompt_text_component,
243
- seed_slicer_component,
244
- randomize_seed_checkbox_component,
245
- strength_slider_component,
246
- num_inference_steps_slider_component
247
- ],
248
- outputs=[
249
- output_image_component,
250
- output_mask_component
251
- ],
252
- run_on_click=True,
253
- cache_examples=True
254
- )
255
 
256
  submit_button_component.click(
257
  fn=process,
 
1
+ import random
2
  from typing import Tuple
3
 
 
 
 
 
4
  import gradio as gr
5
+ import numpy as np
6
  import spaces
7
  import torch
8
  from PIL import Image, ImageFilter
 
21
  IMAGE_SIZE = 1024
22
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
23
 
24
+ client = Client("SkalskiP/florence-sam-masking")
 
25
 
26
 
27
  def remove_background(image: Image.Image, threshold: int = 50) -> Image.Image:
 
39
  return image
40
 
41
 
42
+ # EXAMPLES = [
43
+ # [
44
+ # {
45
+ # "background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
46
+ # "layers": [remove_background(Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-2.png", stream=True).raw))],
47
+ # "composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-2.png", stream=True).raw),
48
+ # },
49
+ # "little lion",
50
+ # None,
51
+ # 42,
52
+ # False,
53
+ # 0.85,
54
+ # 30
55
+ # ],
56
+ # [
57
+ # {
58
+ # "background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
59
+ # "layers": [remove_background(Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-3.png", stream=True).raw))],
60
+ # "composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-3.png", stream=True).raw),
61
+ # },
62
+ # "tattoos",
63
+ # None,
64
+ # 42,
65
+ # False,
66
+ # 0.85,
67
+ # 30
68
+ # ]
69
+ # ]
70
 
71
  pipe = FluxInpaintPipeline.from_pretrained(
72
  "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
 
140
 
141
  mask = mask.filter(ImageFilter.GaussianBlur(radius=5))
142
  width, height = resize_image_dimensions(original_resolution_wh=image.size)
143
+ image = image.resize((width, height), Image.LANCZOS)
144
+ mask = mask.resize((width, height), Image.LANCZOS)
145
 
146
  if randomize_seed_checkbox:
147
  seed_slicer = random.randint(0, MAX_SEED)
148
  generator = torch.Generator().manual_seed(seed_slicer)
149
  result = pipe(
150
  prompt=inpainting_prompt_text,
151
+ image=image,
152
+ mask_image=mask,
153
  width=width,
154
  height=height,
155
  strength=strength_slider,
 
157
  num_inference_steps=num_inference_steps_slider
158
  ).images[0]
159
  print('INFERENCE DONE')
160
+ return result, mask
161
 
162
 
163
  with gr.Blocks() as demo:
 
229
  with gr.Accordion("Debug", open=False):
230
  output_mask_component = gr.Image(
231
  type='pil', image_mode='RGB', label='Input mask', format="png")
232
+ # with gr.Row():
233
+ # gr.Examples(
234
+ # fn=process,
235
+ # examples=EXAMPLES,
236
+ # inputs=[
237
+ # input_image_editor_component,
238
+ # inpainting_prompt_text_component,
239
+ # masking_prompt_text_component,
240
+ # seed_slicer_component,
241
+ # randomize_seed_checkbox_component,
242
+ # strength_slider_component,
243
+ # num_inference_steps_slider_component
244
+ # ],
245
+ # outputs=[
246
+ # output_image_component,
247
+ # output_mask_component
248
+ # ],
249
+ # run_on_click=True,
250
+ # cache_examples=True
251
+ # )
252
 
253
  submit_button_component.click(
254
  fn=process,