Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,14 +5,14 @@ import numpy as np
|
|
5 |
import gradio as gr
|
6 |
import spaces
|
7 |
import torch
|
8 |
-
from PIL import Image
|
9 |
from diffusers import FluxInpaintPipeline
|
|
|
10 |
|
11 |
MARKDOWN = """
|
12 |
# FLUX.1 Inpainting 🔥
|
13 |
-
|
14 |
-
|
15 |
-
for taking it to the next level by enabling inpainting with the FLUX.
|
16 |
"""
|
17 |
|
18 |
MAX_SEED = np.iinfo(np.int32).max
|
@@ -20,8 +20,19 @@ IMAGE_SIZE = 1024
|
|
20 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
21 |
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
def remove_background(image: Image.Image, threshold: int = 50) -> Image.Image:
|
24 |
-
|
|
|
25 |
data = image.getdata()
|
26 |
new_data = []
|
27 |
for item in data:
|
@@ -30,28 +41,10 @@ def remove_background(image: Image.Image, threshold: int = 50) -> Image.Image:
|
|
30 |
new_data.append((0, 0, 0, 0))
|
31 |
else:
|
32 |
new_data.append(item)
|
33 |
-
|
34 |
image.putdata(new_data)
|
35 |
return image
|
36 |
|
37 |
|
38 |
-
def load_image(url: str) -> Image.Image:
|
39 |
-
try:
|
40 |
-
response = requests.get(url, stream=True)
|
41 |
-
response.raise_for_status() # Raise an HTTPError for bad responses
|
42 |
-
image = Image.open(BytesIO(response.content))
|
43 |
-
return image
|
44 |
-
except requests.HTTPError as http_err:
|
45 |
-
print(f"HTTP error occurred: {http_err}")
|
46 |
-
return None
|
47 |
-
except UnidentifiedImageError:
|
48 |
-
print("Cannot identify image file")
|
49 |
-
return None
|
50 |
-
except Exception as err:
|
51 |
-
print(f"Other error occurred: {err}")
|
52 |
-
return None
|
53 |
-
|
54 |
-
|
55 |
EXAMPLES = [
|
56 |
[
|
57 |
{
|
@@ -114,16 +107,19 @@ def process(
|
|
114 |
progress=gr.Progress(track_tqdm=True)
|
115 |
):
|
116 |
if not input_text:
|
117 |
-
|
|
|
118 |
|
119 |
image = input_image_editor.get('background')
|
120 |
-
mask = input_image_editor.get('layers'
|
121 |
|
122 |
if not image:
|
123 |
-
|
|
|
124 |
|
125 |
if not mask:
|
126 |
-
|
|
|
127 |
|
128 |
width, height = resize_image_dimensions(original_resolution_wh=image.size)
|
129 |
resized_image = image.resize((width, height), Image.LANCZOS)
|
@@ -142,8 +138,8 @@ def process(
|
|
142 |
generator=generator,
|
143 |
num_inference_steps=num_inference_steps_slider
|
144 |
).images[0]
|
145 |
-
|
146 |
-
return result, resized_mask
|
147 |
|
148 |
|
149 |
with gr.Blocks() as demo:
|
|
|
5 |
import gradio as gr
|
6 |
import spaces
|
7 |
import torch
|
8 |
+
from PIL import Image
|
9 |
from diffusers import FluxInpaintPipeline
|
10 |
+
from io import BytesIO
|
11 |
|
12 |
MARKDOWN = """
|
13 |
# FLUX.1 Inpainting 🔥
|
14 |
+
|
15 |
+
Shoutout to [Black Forest Labs](https://huggingface.co/black-forest-labs) for creating this model and [Gothos](https://github.com/Gothos) for adding inpainting support to FLUX.
|
|
|
16 |
"""
|
17 |
|
18 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
20 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
21 |
|
22 |
|
23 |
+
def load_image(url: str) -> Image.Image:
|
24 |
+
try:
|
25 |
+
response = requests.get(url, stream=True)
|
26 |
+
response.raise_for_status()
|
27 |
+
return Image.open(BytesIO(response.content)).convert("RGBA")
|
28 |
+
except requests.exceptions.RequestException as e:
|
29 |
+
print(f"Error loading image from {url}: {e}")
|
30 |
+
return None
|
31 |
+
|
32 |
+
|
33 |
def remove_background(image: Image.Image, threshold: int = 50) -> Image.Image:
|
34 |
+
if not image:
|
35 |
+
return None
|
36 |
data = image.getdata()
|
37 |
new_data = []
|
38 |
for item in data:
|
|
|
41 |
new_data.append((0, 0, 0, 0))
|
42 |
else:
|
43 |
new_data.append(item)
|
|
|
44 |
image.putdata(new_data)
|
45 |
return image
|
46 |
|
47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
EXAMPLES = [
|
49 |
[
|
50 |
{
|
|
|
107 |
progress=gr.Progress(track_tqdm=True)
|
108 |
):
|
109 |
if not input_text:
|
110 |
+
gr.Info("Please enter a text prompt.")
|
111 |
+
return None, None
|
112 |
|
113 |
image = input_image_editor.get('background')
|
114 |
+
mask = input_image_editor.get('layers')[0] if input_image_editor.get('layers') else None
|
115 |
|
116 |
if not image:
|
117 |
+
gr.Info("Please upload an image.")
|
118 |
+
return None, None
|
119 |
|
120 |
if not mask:
|
121 |
+
gr.Info("Please draw a mask on the image.")
|
122 |
+
return None, None
|
123 |
|
124 |
width, height = resize_image_dimensions(original_resolution_wh=image.size)
|
125 |
resized_image = image.resize((width, height), Image.LANCZOS)
|
|
|
138 |
generator=generator,
|
139 |
num_inference_steps=num_inference_steps_slider
|
140 |
).images[0]
|
141 |
+
print('INFERENCE DONE')
|
142 |
+
return result, resized_mask
|
143 |
|
144 |
|
145 |
with gr.Blocks() as demo:
|