Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -17,7 +17,7 @@ from compel import Compel
|
|
17 |
from diffusers import EulerDiscreteScheduler
|
18 |
|
19 |
# Device configuration
|
20 |
-
device = torch.device("
|
21 |
|
22 |
# Constants for colors
|
23 |
BG_COLOR = (0, 0, 0, 255) # gray with full opacity
|
@@ -31,14 +31,14 @@ options = vision.ImageSegmenterOptions(base_options=base_options,
|
|
31 |
# Initialize ControlNet inpainting pipeline
|
32 |
controlnet = ControlNetModel.from_pretrained(
|
33 |
'lllyasviel/control_v11p_sd15_inpaint',
|
34 |
-
torch_dtype=torch.
|
35 |
).to(device)
|
36 |
|
37 |
pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
38 |
'runwayml/stable-diffusion-v1-5',
|
39 |
safety_checker=None,
|
40 |
controlnet=controlnet,
|
41 |
-
torch_dtype=torch.
|
42 |
).to(device)
|
43 |
|
44 |
# Set the K_EULER scheduler
|
@@ -79,7 +79,6 @@ def resize_image(image, max_size=1536):
|
|
79 |
image = cv2.resize(image, new_size, interpolation=cv2.INTER_AREA)
|
80 |
return image
|
81 |
|
82 |
-
@spaces.GPU(duration=60)
|
83 |
# Function to inpaint the hair area using ControlNet
|
84 |
def inpaint_hair(image, prompt):
|
85 |
# Only resize the input image if it's larger than 1536 in any dimension
|
@@ -101,20 +100,19 @@ def inpaint_hair(image, prompt):
|
|
101 |
inpaint_condition = torch.from_numpy(np.expand_dims(image_np, 0).transpose(0, 3, 1, 2)).to(device)
|
102 |
|
103 |
# Generate inpainted image
|
104 |
-
generator = torch.
|
105 |
negative_prompt = "lowres, bad quality, poor quality"
|
106 |
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
).images[0]
|
118 |
|
119 |
return np.array(output)
|
120 |
|
@@ -132,4 +130,4 @@ iface = gr.Interface(
|
|
132 |
)
|
133 |
|
134 |
if __name__ == "__main__":
|
135 |
-
iface.launch()
|
|
|
17 |
from diffusers import EulerDiscreteScheduler
|
18 |
|
19 |
# Device configuration
|
20 |
+
device = torch.device("cpu") # Ensure everything is set to run on CPU
|
21 |
|
22 |
# Constants for colors
|
23 |
BG_COLOR = (0, 0, 0, 255) # gray with full opacity
|
|
|
31 |
# Initialize ControlNet inpainting pipeline
|
32 |
controlnet = ControlNetModel.from_pretrained(
|
33 |
'lllyasviel/control_v11p_sd15_inpaint',
|
34 |
+
torch_dtype=torch.float32, # Use float32 for CPU
|
35 |
).to(device)
|
36 |
|
37 |
pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
38 |
'runwayml/stable-diffusion-v1-5',
|
39 |
safety_checker=None,
|
40 |
controlnet=controlnet,
|
41 |
+
torch_dtype=torch.float32, # Use float32 for CPU
|
42 |
).to(device)
|
43 |
|
44 |
# Set the K_EULER scheduler
|
|
|
79 |
image = cv2.resize(image, new_size, interpolation=cv2.INTER_AREA)
|
80 |
return image
|
81 |
|
|
|
82 |
# Function to inpaint the hair area using ControlNet
|
83 |
def inpaint_hair(image, prompt):
|
84 |
# Only resize the input image if it's larger than 1536 in any dimension
|
|
|
100 |
inpaint_condition = torch.from_numpy(np.expand_dims(image_np, 0).transpose(0, 3, 1, 2)).to(device)
|
101 |
|
102 |
# Generate inpainted image
|
103 |
+
generator = torch.manual_seed(42)
|
104 |
negative_prompt = "lowres, bad quality, poor quality"
|
105 |
|
106 |
+
output = pipe(
|
107 |
+
prompt=prompt,
|
108 |
+
negative_prompt=negative_prompt,
|
109 |
+
image=image_pil,
|
110 |
+
mask_image=mask_pil,
|
111 |
+
control_image=inpaint_condition,
|
112 |
+
num_inference_steps=25,
|
113 |
+
guidance_scale=7.5,
|
114 |
+
generator=generator
|
115 |
+
).images[0]
|
|
|
116 |
|
117 |
return np.array(output)
|
118 |
|
|
|
130 |
)
|
131 |
|
132 |
if __name__ == "__main__":
|
133 |
+
iface.launch()
|