ginipick commited on
Commit
1cd14fa
β€’
1 Parent(s): e7fc396

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -193
app.py CHANGED
@@ -2,6 +2,8 @@ import tempfile
2
  import time
3
  from collections.abc import Sequence
4
  from typing import Any, cast
 
 
5
 
6
  import gradio as gr
7
  import numpy as np
@@ -15,21 +17,17 @@ from pymatting.foreground.estimate_foreground_ml import estimate_foreground_ml
15
  from refiners.fluxion.utils import no_grad
16
  from refiners.solutions import BoxSegmenter
17
  from transformers import GroundingDinoForObjectDetection, GroundingDinoProcessor
18
-
19
- import spaces
20
- import argparse
21
- import os
22
- from os import path
23
- import shutil
24
- from datetime import datetime
25
- from safetensors.torch import load_file
26
- from huggingface_hub import hf_hub_download
27
- import gradio as gr
28
  from diffusers import FluxPipeline
29
- from PIL import Image
30
- from huggingface_hub import login
31
 
32
- # HF 토큰 인증 처리
 
 
 
 
 
 
 
 
33
  HF_TOKEN = os.getenv("HF_TOKEN")
34
  if HF_TOKEN is None:
35
  raise ValueError("Please set the HF_TOKEN environment variable")
@@ -39,40 +37,7 @@ try:
39
  except Exception as e:
40
  raise ValueError(f"Failed to login to Hugging Face: {str(e)}")
41
 
42
- # FLUX νŒŒμ΄ν”„λΌμΈ μ΄ˆκΈ°ν™” μˆ˜μ •
43
- def initialize_pipeline():
44
- try:
45
- pipe = FluxPipeline.from_pretrained(
46
- "black-forest-labs/FLUX.1-dev",
47
- torch_dtype=torch.bfloat16,
48
- use_auth_token=HF_TOKEN
49
- )
50
- pipe.load_lora_weights(
51
- hf_hub_download(
52
- "ByteDance/Hyper-SD",
53
- "Hyper-FLUX.1-dev-8steps-lora.safetensors",
54
- use_auth_token=HF_TOKEN
55
- )
56
- )
57
- pipe.fuse_lora(lora_scale=0.125)
58
- pipe.to(device="cuda", dtype=torch.bfloat16)
59
- return pipe
60
- except Exception as e:
61
- raise ValueError(f"Failed to initialize pipeline: {str(e)}")
62
- # νŒŒμ΄ν”„λΌμΈ μ΄ˆκΈ°ν™”
63
- try:
64
- pipe = initialize_pipeline()
65
- except Exception as e:
66
- raise RuntimeError(f"Failed to setup the model: {str(e)}")
67
-
68
- BoundingBox = tuple[int, int, int, int]
69
-
70
- pillow_heif.register_heif_opener()
71
- pillow_heif.register_avif_opener()
72
-
73
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
74
-
75
- # weird dance because ZeroGPU
76
  segmenter = BoxSegmenter(device="cpu")
77
  segmenter.device = device
78
  segmenter.model = segmenter.model.to(device=segmenter.device)
@@ -80,66 +45,25 @@ segmenter.model = segmenter.model.to(device=segmenter.device)
80
  gd_model_path = "IDEA-Research/grounding-dino-base"
81
  gd_processor = GroundingDinoProcessor.from_pretrained(gd_model_path)
82
  gd_model = GroundingDinoForObjectDetection.from_pretrained(gd_model_path, torch_dtype=torch.float32)
83
- gd_model = gd_model.to(device=device) # type: ignore
84
  assert isinstance(gd_model, GroundingDinoForObjectDetection)
85
 
86
- # FLUX νŒŒμ΄ν”„λΌμΈ μ΄ˆκΈ°ν™” μ½”λ“œ μΆ”κ°€
87
- pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
88
- pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"))
 
 
 
 
 
 
 
 
 
 
89
  pipe.fuse_lora(lora_scale=0.125)
90
  pipe.to(device="cuda", dtype=torch.bfloat16)
91
 
92
- def generate_background(prompt: str, width: int, height: int) -> Image.Image:
93
- """λ°°κ²½ 이미지 생성 ν•¨μˆ˜"""
94
- try:
95
- with timer("Background generation"):
96
- image = pipe(
97
- prompt=prompt,
98
- width=width,
99
- height=height,
100
- num_inference_steps=8,
101
- guidance_scale=4.0,
102
- ).images[0]
103
- return image
104
- except Exception as e:
105
- raise gr.Error(f"Background generation failed: {str(e)}") # κ΄„ν˜Έ λ‹«κΈ° μˆ˜μ •
106
-
107
-
108
- def combine_with_background(foreground: Image.Image, background: Image.Image) -> Image.Image:
109
- """μ „κ²½κ³Ό λ°°κ²½ ν•©μ„± ν•¨μˆ˜"""
110
- background = background.resize(foreground.size)
111
- return Image.alpha_composite(background.convert('RGBA'), foreground)
112
-
113
- def _process(
114
- img: Image.Image,
115
- prompt: str | BoundingBox | None,
116
- bg_prompt: str | None,
117
- ) -> tuple[tuple[Image.Image, Image.Image, Image.Image], gr.DownloadButton]:
118
- try:
119
- # κΈ°μ‘΄ 객체 μΆ”μΆœ 둜직
120
- mask, bbox, time_log = _gpu_process(img, prompt)
121
- masked_alpha = apply_mask(img, mask, defringe=True)
122
-
123
- # λ°°κ²½ 생성 및 ν•©μ„±
124
- if bg_prompt:
125
- background = generate_background(bg_prompt, img.width, img.height)
126
- combined = combine_with_background(masked_alpha, background)
127
- else:
128
- combined = Image.alpha_composite(Image.new("RGBA", masked_alpha.size, "white"), masked_alpha)
129
-
130
- # μ €μž₯ 둜직
131
- thresholded = mask.point(lambda p: 255 if p > 10 else 0)
132
- bbox = thresholded.getbbox()
133
- to_dl = masked_alpha.crop(bbox)
134
-
135
- temp = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
136
- to_dl.save(temp, format="PNG")
137
- temp.close()
138
-
139
- return (img, combined, masked_alpha), gr.DownloadButton(value=temp.name, interactive=True)
140
- except Exception as e:
141
- raise gr.Error(f"Processing failed: {str(e)}")
142
-
143
  def bbox_union(bboxes: Sequence[list[int]]) -> BoundingBox | None:
144
  if not bboxes:
145
  return None
@@ -153,18 +77,12 @@ def bbox_union(bboxes: Sequence[list[int]]) -> BoundingBox | None:
153
  max(bbox[3] for bbox in bboxes),
154
  )
155
 
156
-
157
  def corners_to_pixels_format(bboxes: torch.Tensor, width: int, height: int) -> torch.Tensor:
158
  x1, y1, x2, y2 = bboxes.round().to(torch.int32).unbind(-1)
159
  return torch.stack((x1.clamp_(0, width), y1.clamp_(0, height), x2.clamp_(0, width), y2.clamp_(0, height)), dim=-1)
160
 
161
-
162
  def gd_detect(img: Image.Image, prompt: str) -> BoundingBox | None:
163
- assert isinstance(gd_processor, GroundingDinoProcessor)
164
-
165
- # Grounding Dino expects a dot after each category.
166
  inputs = gd_processor(images=img, text=f"{prompt}.", return_tensors="pt").to(device=device)
167
-
168
  with no_grad():
169
  outputs = gd_model(**inputs)
170
  width, height = img.size
@@ -174,41 +92,44 @@ def gd_detect(img: Image.Image, prompt: str) -> BoundingBox | None:
174
  target_sizes=[(height, width)],
175
  )[0]
176
  assert "boxes" in results and isinstance(results["boxes"], torch.Tensor)
177
-
178
  bboxes = corners_to_pixels_format(results["boxes"].cpu(), width, height)
179
  return bbox_union(bboxes.numpy().tolist())
180
 
181
-
182
- def apply_mask(
183
- img: Image.Image,
184
- mask_img: Image.Image,
185
- defringe: bool = True,
186
- ) -> Image.Image:
187
  assert img.size == mask_img.size
188
  img = img.convert("RGB")
189
  mask_img = mask_img.convert("L")
190
-
191
  if defringe:
192
- # Mitigate edge halo effects via color decontamination
193
  rgb, alpha = np.asarray(img) / 255.0, np.asarray(mask_img) / 255.0
194
  foreground = cast(np.ndarray[Any, np.dtype[np.uint8]], estimate_foreground_ml(rgb, alpha))
195
  img = Image.fromarray((foreground * 255).astype("uint8"))
196
-
197
  result = Image.new("RGBA", img.size)
198
  result.paste(img, (0, 0), mask_img)
199
  return result
200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
 
202
- @spaces.GPU
203
- def _gpu_process(
204
- img: Image.Image,
205
- prompt: str | BoundingBox | None,
206
- ) -> tuple[Image.Image, BoundingBox | None, list[str]]:
207
- # Because of ZeroGPU shenanigans, we need a *single* function with the
208
- # `spaces.GPU` decorator that *does not* contain postprocessing.
209
 
 
 
210
  time_log: list[str] = []
211
-
212
  if isinstance(prompt, str):
213
  t0 = time.time()
214
  bbox = gd_detect(img, prompt)
@@ -218,16 +139,40 @@ def _gpu_process(
218
  raise gr.Error("No object detected")
219
  else:
220
  bbox = prompt
221
-
222
  t0 = time.time()
223
  mask = segmenter(img, bbox)
224
  time_log.append(f"segment: {time.time() - t0}")
225
-
226
  return mask, bbox, time_log
227
 
 
 
 
 
 
 
 
228
 
 
 
229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
 
 
231
 
232
  def process_bbox(prompts: dict[str, Any]) -> tuple[tuple[Image.Image, Image.Image], gr.DownloadButton]:
233
  assert isinstance(img := prompts["image"], Image.Image)
@@ -240,45 +185,42 @@ def process_bbox(prompts: dict[str, Any]) -> tuple[tuple[Image.Image, Image.Imag
240
  bbox = None
241
  return _process(img, bbox)
242
 
243
-
244
  def on_change_bbox(prompts: dict[str, Any] | None):
245
  return gr.update(interactive=prompts is not None)
246
 
 
 
247
 
248
- def process_prompt(img: Image.Image, prompt: str) -> tuple[tuple[Image.Image, Image.Image], gr.DownloadButton]:
249
- return _process(img, prompt)
250
-
251
-
252
- def on_change_prompt(img: Image.Image | None, prompt: str | None):
253
  return gr.update(interactive=bool(img and prompt))
254
 
255
-
256
  css = """
257
- footer {
258
- visibility: hidden;
 
 
 
 
 
 
 
 
 
 
 
259
  }
260
  """
261
 
262
- # μŠ€νƒ€μΌ μ •μ˜ μΆ”κ°€
263
- css = """
264
- footer {visibility: hidden}
265
- .container {max-width: 1200px; margin: auto; padding: 20px;}
266
- .main-title {text-align: center; color: #2a2a2a; margin-bottom: 2em;}
267
- .tabs {background: #f7f7f7; border-radius: 15px; padding: 20px;}
268
- .input-column {background: white; padding: 20px; border-radius: 10px; box-shadow: 0 2px 6px rgba(0,0,0,0.1);}
269
- .output-column {background: white; padding: 20px; border-radius: 10px; box-shadow: 0 2px 6px rgba(0,0,0,0.1);}
270
- .custom-button {background: #2196F3; color: white; border: none; border-radius: 5px; padding: 10px 20px;}
271
- .custom-button:hover {background: #1976D2;}
272
- .example-region {margin-top: 2em; padding: 20px; background: #f0f0f0; border-radius: 10px;}
273
- """
274
-
275
- def process_prompt(img: Image.Image, prompt: str, bg_prompt: str = None) -> tuple[tuple[Image.Image, Image.Image], gr.DownloadButton]:
276
- return _process(img, prompt, bg_prompt)
277
-
278
- def on_change_prompt(img: Image.Image | None, prompt: str | None, bg_prompt: str | None = None):
279
- return gr.update(interactive=bool(img and prompt))
280
-
281
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
 
 
 
 
 
 
 
282
  with gr.Tabs() as tabs:
283
  with gr.Tab("✨ Extract by Text", id="tab_prompt"):
284
  with gr.Row(equal_height=True):
@@ -316,26 +258,8 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
316
 
317
  with gr.Accordion("πŸ“š Examples", open=False):
318
  examples = [
319
- {
320
- "image": "examples/text.jpg",
321
- "prompt": "text",
322
- "bg_prompt": "white background"
323
- },
324
- {
325
- "image": "examples/potted-plant.jpg",
326
- "prompt": "potted plant",
327
- "bg_prompt": "natural garden background"
328
- },
329
- {
330
- "image": "examples/chair.jpg",
331
- "prompt": "chair",
332
- "bg_prompt": "modern living room"
333
- },
334
- {
335
- "image": "examples/black-lamp.jpg",
336
- "prompt": "black lamp",
337
- "bg_prompt": "minimalist interior"
338
- }
339
  ]
340
  ex = gr.Examples(
341
  examples=examples,
@@ -345,7 +269,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
345
  cache_examples=True
346
  )
347
 
348
- # Bounding Box νƒ­
349
  with gr.Tab("πŸ“ Extract by Box", id="tab_bb"):
350
  with gr.Row(equal_height=True):
351
  with gr.Column(scale=1, min_width=400):
@@ -377,22 +300,8 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
377
 
378
  with gr.Accordion("πŸ“š Examples", open=False):
379
  examples_bb = [
380
- {
381
- "image": "examples/text.jpg",
382
- "boxes": [{"xmin": 51, "ymin": 511, "xmax": 639, "ymax": 1255}]
383
- },
384
- {
385
- "image": "examples/potted-plant.jpg",
386
- "boxes": [{"xmin": 51, "ymin": 511, "xmax": 639, "ymax": 1255}]
387
- },
388
- {
389
- "image": "examples/chair.jpg",
390
- "boxes": [{"xmin": 98, "ymin": 330, "xmax": 973, "ymax": 1468}]
391
- },
392
- {
393
- "image": "examples/black-lamp.jpg",
394
- "boxes": [{"xmin": 88, "ymin": 148, "xmax": 700, "ymax": 1414}]
395
- }
396
  ]
397
  ex_bb = gr.Examples(
398
  examples=examples_bb,
 
2
  import time
3
  from collections.abc import Sequence
4
  from typing import Any, cast
5
+ import os
6
+ from huggingface_hub import login
7
 
8
  import gradio as gr
9
  import numpy as np
 
17
  from refiners.fluxion.utils import no_grad
18
  from refiners.solutions import BoxSegmenter
19
  from transformers import GroundingDinoForObjectDetection, GroundingDinoProcessor
 
 
 
 
 
 
 
 
 
 
20
  from diffusers import FluxPipeline
 
 
21
 
22
+ BoundingBox = tuple[int, int, int, int]
23
+
24
+ # μ΄ˆκΈ°ν™” 및 μ„€μ •
25
+ pillow_heif.register_heif_opener()
26
+ pillow_heif.register_avif_opener()
27
+
28
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
29
+
30
+ # HF 토큰 μ„€μ •
31
  HF_TOKEN = os.getenv("HF_TOKEN")
32
  if HF_TOKEN is None:
33
  raise ValueError("Please set the HF_TOKEN environment variable")
 
37
  except Exception as e:
38
  raise ValueError(f"Failed to login to Hugging Face: {str(e)}")
39
 
40
+ # λͺ¨λΈ μ΄ˆκΈ°ν™”
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  segmenter = BoxSegmenter(device="cpu")
42
  segmenter.device = device
43
  segmenter.model = segmenter.model.to(device=segmenter.device)
 
45
  gd_model_path = "IDEA-Research/grounding-dino-base"
46
  gd_processor = GroundingDinoProcessor.from_pretrained(gd_model_path)
47
  gd_model = GroundingDinoForObjectDetection.from_pretrained(gd_model_path, torch_dtype=torch.float32)
48
+ gd_model = gd_model.to(device=device)
49
  assert isinstance(gd_model, GroundingDinoForObjectDetection)
50
 
51
+ # FLUX νŒŒμ΄ν”„λΌμΈ μ΄ˆκΈ°ν™”
52
+ pipe = FluxPipeline.from_pretrained(
53
+ "black-forest-labs/FLUX.1-dev",
54
+ torch_dtype=torch.bfloat16,
55
+ use_auth_token=HF_TOKEN
56
+ )
57
+ pipe.load_lora_weights(
58
+ hf_hub_download(
59
+ "ByteDance/Hyper-SD",
60
+ "Hyper-FLUX.1-dev-8steps-lora.safetensors",
61
+ use_auth_token=HF_TOKEN
62
+ )
63
+ )
64
  pipe.fuse_lora(lora_scale=0.125)
65
  pipe.to(device="cuda", dtype=torch.bfloat16)
66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  def bbox_union(bboxes: Sequence[list[int]]) -> BoundingBox | None:
68
  if not bboxes:
69
  return None
 
77
  max(bbox[3] for bbox in bboxes),
78
  )
79
 
 
80
  def corners_to_pixels_format(bboxes: torch.Tensor, width: int, height: int) -> torch.Tensor:
81
  x1, y1, x2, y2 = bboxes.round().to(torch.int32).unbind(-1)
82
  return torch.stack((x1.clamp_(0, width), y1.clamp_(0, height), x2.clamp_(0, width), y2.clamp_(0, height)), dim=-1)
83
 
 
84
  def gd_detect(img: Image.Image, prompt: str) -> BoundingBox | None:
 
 
 
85
  inputs = gd_processor(images=img, text=f"{prompt}.", return_tensors="pt").to(device=device)
 
86
  with no_grad():
87
  outputs = gd_model(**inputs)
88
  width, height = img.size
 
92
  target_sizes=[(height, width)],
93
  )[0]
94
  assert "boxes" in results and isinstance(results["boxes"], torch.Tensor)
 
95
  bboxes = corners_to_pixels_format(results["boxes"].cpu(), width, height)
96
  return bbox_union(bboxes.numpy().tolist())
97
 
98
+ def apply_mask(img: Image.Image, mask_img: Image.Image, defringe: bool = True) -> Image.Image:
 
 
 
 
 
99
  assert img.size == mask_img.size
100
  img = img.convert("RGB")
101
  mask_img = mask_img.convert("L")
 
102
  if defringe:
 
103
  rgb, alpha = np.asarray(img) / 255.0, np.asarray(mask_img) / 255.0
104
  foreground = cast(np.ndarray[Any, np.dtype[np.uint8]], estimate_foreground_ml(rgb, alpha))
105
  img = Image.fromarray((foreground * 255).astype("uint8"))
 
106
  result = Image.new("RGBA", img.size)
107
  result.paste(img, (0, 0), mask_img)
108
  return result
109
 
110
+ def generate_background(prompt: str, width: int, height: int) -> Image.Image:
111
+ """λ°°κ²½ 이미지 생성 ν•¨μˆ˜"""
112
+ try:
113
+ with timer("Background generation"):
114
+ image = pipe(
115
+ prompt=prompt,
116
+ width=width,
117
+ height=height,
118
+ num_inference_steps=8,
119
+ guidance_scale=4.0,
120
+ ).images[0]
121
+ return image
122
+ except Exception as e:
123
+ raise gr.Error(f"Background generation failed: {str(e)}")
124
 
125
+ def combine_with_background(foreground: Image.Image, background: Image.Image) -> Image.Image:
126
+ """μ „κ²½κ³Ό λ°°κ²½ ν•©μ„± ν•¨μˆ˜"""
127
+ background = background.resize(foreground.size)
128
+ return Image.alpha_composite(background.convert('RGBA'), foreground)
 
 
 
129
 
130
+ @spaces.GPU
131
+ def _gpu_process(img: Image.Image, prompt: str | BoundingBox | None) -> tuple[Image.Image, BoundingBox | None, list[str]]:
132
  time_log: list[str] = []
 
133
  if isinstance(prompt, str):
134
  t0 = time.time()
135
  bbox = gd_detect(img, prompt)
 
139
  raise gr.Error("No object detected")
140
  else:
141
  bbox = prompt
 
142
  t0 = time.time()
143
  mask = segmenter(img, bbox)
144
  time_log.append(f"segment: {time.time() - t0}")
 
145
  return mask, bbox, time_log
146
 
147
+ def _process(img: Image.Image, prompt: str | BoundingBox | None, bg_prompt: str | None = None) -> tuple[tuple[Image.Image, Image.Image, Image.Image], gr.DownloadButton]:
148
+ if img.width > 2048 or img.height > 2048:
149
+ orig_res = max(img.width, img.height)
150
+ img.thumbnail((2048, 2048))
151
+ if isinstance(prompt, tuple):
152
+ x0, y0, x1, y1 = (int(x * 2048 / orig_res) for x in prompt)
153
+ prompt = (x0, y0, x1, y1)
154
 
155
+ mask, bbox, time_log = _gpu_process(img, prompt)
156
+ masked_alpha = apply_mask(img, mask, defringe=True)
157
 
158
+ if bg_prompt:
159
+ try:
160
+ background = generate_background(bg_prompt, img.width, img.height)
161
+ combined = combine_with_background(masked_alpha, background)
162
+ except Exception as e:
163
+ raise gr.Error(f"Background processing failed: {str(e)}")
164
+ else:
165
+ combined = Image.alpha_composite(Image.new("RGBA", masked_alpha.size, "white"), masked_alpha)
166
+
167
+ thresholded = mask.point(lambda p: 255 if p > 10 else 0)
168
+ bbox = thresholded.getbbox()
169
+ to_dl = masked_alpha.crop(bbox)
170
+
171
+ temp = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
172
+ to_dl.save(temp, format="PNG")
173
+ temp.close()
174
 
175
+ return (img, combined, masked_alpha), gr.DownloadButton(value=temp.name, interactive=True)
176
 
177
  def process_bbox(prompts: dict[str, Any]) -> tuple[tuple[Image.Image, Image.Image], gr.DownloadButton]:
178
  assert isinstance(img := prompts["image"], Image.Image)
 
185
  bbox = None
186
  return _process(img, bbox)
187
 
 
188
  def on_change_bbox(prompts: dict[str, Any] | None):
189
  return gr.update(interactive=prompts is not None)
190
 
191
+ def process_prompt(img: Image.Image, prompt: str, bg_prompt: str | None = None) -> tuple[tuple[Image.Image, Image.Image], gr.DownloadButton]:
192
+ return _process(img, prompt, bg_prompt)
193
 
194
+ def on_change_prompt(img: Image.Image | None, prompt: str | None, bg_prompt: str | None = None):
 
 
 
 
195
  return gr.update(interactive=bool(img and prompt))
196
 
197
+ # CSS μŠ€νƒ€μΌ μ •μ˜
198
  css = """
199
+ footer {display: none}
200
+ .main-title {
201
+ text-align: center;
202
+ margin: 2em 0;
203
+ }
204
+ .main-title h1 {
205
+ color: #2196F3;
206
+ font-size: 2.5em;
207
+ }
208
+ .container {
209
+ max-width: 1200px;
210
+ margin: auto;
211
+ padding: 20px;
212
  }
213
  """
214
 
215
+ # Gradio UI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
217
+ gr.HTML("""
218
+ <div class="main-title">
219
+ <h1>🎨 Advanced Image Object Extractor</h1>
220
+ <p>Extract objects from images using text prompts or bounding boxes</p>
221
+ </div>
222
+ """)
223
+
224
  with gr.Tabs() as tabs:
225
  with gr.Tab("✨ Extract by Text", id="tab_prompt"):
226
  with gr.Row(equal_height=True):
 
258
 
259
  with gr.Accordion("πŸ“š Examples", open=False):
260
  examples = [
261
+ ["examples/text.jpg", "text", "white background"],
262
+ ["examples/black-lamp.jpg", "black lamp", "minimalist interior"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
  ]
264
  ex = gr.Examples(
265
  examples=examples,
 
269
  cache_examples=True
270
  )
271
 
 
272
  with gr.Tab("πŸ“ Extract by Box", id="tab_bb"):
273
  with gr.Row(equal_height=True):
274
  with gr.Column(scale=1, min_width=400):
 
300
 
301
  with gr.Accordion("πŸ“š Examples", open=False):
302
  examples_bb = [
303
+ ["examples/text.jpg", [{"xmin": 51, "ymin": 511, "xmax": 639, "ymax": 1255}]],
304
+ ["examples/black-lamp.jpg", [{"xmin": 88, "ymin": 148, "xmax": 700, "ymax": 1414}]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
305
  ]
306
  ex_bb = gr.Examples(
307
  examples=examples_bb,