File size: 27,317 Bytes
9244e51
 
12f7b19
 
 
9244e51
003a57c
 
5043916
0b4a056
 
 
 
dcb1138
dd3f9d3
 
 
 
 
 
 
 
 
 
8fa0841
 
 
dcb1138
 
9244e51
e490d6f
9244e51
 
 
 
 
 
8300e39
9244e51
dcb1138
f0b0540
0435f91
db01d0c
e7d1e40
9244e51
 
 
 
 
 
 
8300e39
e490d6f
dcb1138
 
0b4a056
dcb1138
 
 
 
 
 
 
 
 
 
0b4a056
dcb1138
e490d6f
dcb1138
9244e51
72ff7a6
dcb1138
e490d6f
9244e51
12f7b19
 
 
 
 
 
0bf2ba9
3de9fa9
8300e39
948b6ac
eb4960f
948b6ac
7480e87
eb4960f
948b6ac
eb4960f
 
 
 
 
 
 
8300e39
9244e51
 
dcb1138
e490d6f
 
 
dcb1138
 
 
 
 
 
 
 
 
e490d6f
 
dcb1138
8fa0841
 
 
99c56f7
e490d6f
9244e51
377f8fb
6b98e01
377f8fb
 
6b98e01
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0bf2ba9
6b98e01
377f8fb
 
8ecf633
 
377f8fb
 
 
 
 
230457e
377f8fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0bf2ba9
377f8fb
 
 
 
ee8868b
377f8fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee8868b
 
 
377f8fb
 
 
 
 
 
 
 
 
 
 
 
 
ee8868b
 
 
377f8fb
 
 
 
 
 
 
 
 
 
 
 
ee8868b
 
 
377f8fb
 
 
 
 
 
 
 
 
 
 
 
 
8b0b992
 
377f8fb
9244e51
 
c8d4d3d
9244e51
c8d4d3d
9244e51
 
 
 
bbecd0b
9244e51
 
 
 
 
 
 
 
 
 
8300e39
9244e51
2f6f262
884af31
8300e39
 
 
 
 
 
 
f0b0540
0435f91
 
e490d6f
0435f91
8300e39
9244e51
 
 
 
 
 
 
 
8300e39
 
f0b0540
9244e51
 
8300e39
9244e51
 
 
8300e39
0bf2ba9
9244e51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
import gradio as gr
from PIL import Image
import base64
from io import BytesIO

import torch
#from diffusers import FluxControlNetModel
#from diffusers.pipelines import FluxControlNetPipeline

from diffusers import DiffusionPipeline

#from diffusers import FluxControlNetPipeline
#from diffusers import FluxControlNetModel #, FluxMultiControlNetModel

"""
from diffusers import DiffusionPipeline

pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev")
pipe.load_lora_weights("enhanceaiteam/Flux-Uncensored-V2")

prompt = "nsfw nude woman on beach, sunset, long flowing hair, sensual pose"
image = pipe(prompt).images[0]
"""

#import torch.nn.functional as F
#import torchvision
#import torchvision.transforms as T
#import cv2

from diffusers import StableDiffusionInpaintPipeline

import numpy as np
import os
import shutil
from gradio_client import Client, handle_file

# Load the model once globally to avoid repeated loading
"""
def load_inpainting_model():
    # Load pipeline
    #model_path = "urpmv13Inpainting.safetensors"
    model_path = "uberRealisticPornMerge_v23Inpainting.safetensors"
    #model_path = "pornmasterFantasy_v4-inpainting.safetensors"
    #model_path = "pornmasterAmateur_v6Vae-inpainting.safetensors"
    device = "cpu"  # Explicitly use CPU
    pipe = StableDiffusionInpaintPipeline.from_single_file(
        model_path,
        torch_dtype=torch.float32,  # Use float32 for CPU
        safety_checker=None
    ).to(device)
    return pipe
"""
"""
# Load the model once globally to avoid repeated loading
def load_upscaling_model():
    # Load pipeline
    device = "cpu"  # Explicitly use CPU
    controlnet = FluxControlNetModel.from_pretrained(
        "jasperai/Flux.1-dev-Controlnet-Upscaler",
        torch_dtype=torch.float32
    )
    pipe = FluxControlNetPipeline.from_pretrained(
        "black-forest-labs/FLUX.1-dev",
        controlnet=controlnet,
        torch_dtype=torch.float32
    ).to(device)
    pipe = DiffusionPipeline.from_pretrained("jasperai/Flux.1-dev-Controlnet-Upscaler")    
    return pipe
"""

# Preload the model once
#inpaint_pipeline = load_inpainting_model()
# Preload the model once
#upscale_pipeline = load_upscaling_model()

def encode_image(orig_image):
    buffered = BytesIO()
    orig_image.save(buffered, format="JPEG")
    img_str = base64.b64encode(buffered.getvalue())
    return img_str

def resize_image(input_image, mask_image):
    orig_image = mask_image.resize(input_image.size, Image.Resampling.LANCZOS)    
    aspect_ratio = orig_image.height / orig_image.width
    old_width = orig_image.width
    new_width = int(orig_image.width*0.8)
    old_height = orig_image.height
    new_height = int(new_width * aspect_ratio)
    #resized_image = orig_image.resize((new_width, new_height), Image.Resampling.LANCZOS)

    left_crop = int((old_width - new_width)/2)
    right_crop = old_width - int((old_width - new_width) / 2)
    top_crop = int((old_height - new_height)/2)
    bottom_crop = old_height - int((old_height - new_height) / 2)
    cropped_image = orig_image.crop((left_crop,top_crop,right_crop,bottom_crop))
    return_image = cropped_image.resize(cropped_image.size, Image.Resampling.LANCZOS)    
    return return_image

# Function to resize image (simpler interpolation method for speed)
def resize_to_match(input_image, output_image):

    #w, h = output_image.size
    #control_image = output_image.resize((w * 4, h * 4))
    """    
    scaled_image = pipe(
        prompt="", 
        control_image=control_image,
        controlnet_conditioning_scale=0.6,
        num_inference_steps=28, 
        guidance_scale=3.5,
        height=control_image.size[1],
        width=control_image.size[0]
    ).images[0]    
    """
    #return scaled_image
    
    #torch_img = pil_to_torch(input_image)
    #torch_img_scaled = F.interpolate(torch_img.unsqueeze(0),mode='trilinear').squeeze(0)
    #output_image = torchvision.transforms.functional.to_pil_image(torch_img_scaled, mode=None)
    
    return output_image.resize(input_image.size, Image.BICUBIC)  # Use BILINEAR for faster resizing

def generate_image_old(image_path, mask_path, text_prompt="undress", negative_prompt=""):
    result = client.predict(
				text_prompt,	# str in 'parameter_10' Textbox component
				negative_prompt,	# str in 'Negative Prompt' Textbox component
				["Fooocus V2","Fooocus Enhance","Fooocus Sharp"],	# List[str] in 'Selected Styles' Checkboxgroup component
				"Quality",	# str in 'Performance' Radio component
				'704×1408 <span style="color: grey;"> ∣ 1:2</span>',	# str in 'Aspect Ratios' Radio component
				1,	# int | float (numeric value between 1 and 32) in 'Image Number' Slider component
				"-1",	# str in 'Seed' Textbox component
				0,	# int | float (numeric value between 0.0 and 30.0) in 'Image Sharpness' Slider component
				1,	# int | float (numeric value between 1.0 and 30.0) in 'Guidance Scale' Slider component
				"juggernautXL_version6Rundiffusion.safetensors",	# str (Option from: ['ACertainty.ckpt', 'ACertainty.safetensors', 'juggernautXL_version6Rundiffusion.safetensors']) in 'Base Model (SDXL only)' Dropdown component
				"None",	# str (Option from: ['None', 'ACertainty.ckpt', 'ACertainty.safetensors', 'juggernautXL_version6Rundiffusion.safetensors']) 								in 'Refiner (SDXL or SD 1.5)' Dropdown component
				0.1,	# int | float (numeric value between 0.1 and 1.0)								in 'Refiner Switch At' Slider component
				"None",	# str (Option from: ['None', 'sdxl_lcm_lora.safetensors', 'sd_xl_offset_example-lora_1.0.safetensors'])								in 'LoRA 1' Dropdown component
				-2,	# int | float (numeric value between -2 and 2)								in 'Weight' Slider component
				"None",	# str (Option from: ['None', 'sdxl_lcm_lora.safetensors', 'sd_xl_offset_example-lora_1.0.safetensors'])								in 'LoRA 2' Dropdown component
				-2,	# int | float (numeric value between -2 and 2)								in 'Weight' Slider component
				"None",	# str (Option from: ['None', 'sdxl_lcm_lora.safetensors', 'sd_xl_offset_example-lora_1.0.safetensors'])								in 'LoRA 3' Dropdown component
				-2,	# int | float (numeric value between -2 and 2)								in 'Weight' Slider component
				"None",	# str (Option from: ['None', 'sdxl_lcm_lora.safetensors', 'sd_xl_offset_example-lora_1.0.safetensors'])								in 'LoRA 4' Dropdown component
				-2,	# int | float (numeric value between -2 and 2)								in 'Weight' Slider component
				"None",	# str (Option from: ['None', 'sdxl_lcm_lora.safetensors', 'sd_xl_offset_example-lora_1.0.safetensors'])								in 'LoRA 5' Dropdown component
				-2,	# int | float (numeric value between -2 and 2)								in 'Weight' Slider component
				True,	# bool in 'Input Image' Checkbox component
				"",	# str in 'parameter_85' Textbox component
				"Disabled",	# str in 'Upscale or Variation:' Radio component
				None,	# str (filepath or URL to image)								in 'Drag above image to here' Image component
				[],	# List[str] in 'Outpaint Direction' Checkboxgroup component

				image_path,	# str (filepath or URL to image)								in 'Drag inpaint or outpaint image to here' Image component
				"",	# str in 'Inpaint Additional Prompt' Textbox component
				mask_path,	# str (filepath or URL to image)	in 'Mask Upload' Image component

				image_path,	# str (filepath or URL to image)		in 'Image' Image component
				0,	# int | float (numeric value between 0.0 and 1.0)								in 'Stop At' Slider component
				0,	# int | float (numeric value between 0.0 and 2.0)								in 'Weight' Slider component
				"ImagePrompt",	# str in 'Type' Radio component
				None,	# str (filepath or URL to image)								in 'Image' Image component
				0,	# int | float (numeric value between 0.0 and 1.0)								in 'Stop At' Slider component
				0,	# int | float (numeric value between 0.0 and 2.0)								in 'Weight' Slider component
				"ImagePrompt",	# str in 'Type' Radio component
				None,	# str (filepath or URL to image)								in 'Image' Image component
				0,	# int | float (numeric value between 0.0 and 1.0)								in 'Stop At' Slider component
				0,	# int | float (numeric value between 0.0 and 2.0)								in 'Weight' Slider component
				"ImagePrompt",	# str in 'Type' Radio component
				None,	# str (filepath or URL to image)								in 'Image' Image component
				0,	# int | float (numeric value between 0.0 and 1.0)								in 'Stop At' Slider component
				0,	# int | float (numeric value between 0.0 and 2.0)								in 'Weight' Slider component
				"ImagePrompt",	# str in 'Type' Radio component

				fn_index=33
    )


def generate_image(image_path, mask_path, text_prompt="undress", negative_prompt=""):
    client = Client("https://fooocus-ui.emcdn.ru/")
    result = client.predict(
				False,	# bool in 'Generate Image Grid for Each Batch' Checkbox component
				text_prompt,	# str in 'parameter_12' Textbox component
				negative_prompt,	# str in 'Negative Prompt' Textbox component
				["Fooocus V2"],	# List[str] in 'Selected Styles' Checkboxgroup component from: ["Fooocus V2","Fooocus Enhance","Fooocus Sharp"]
				"Quality",	# str in 'Performance' Radio component
				'704×1408 <span style="color: grey;"> ∣ 1:2</span>',	# str in 'Aspect Ratios' Radio component
				1,	# int | float (numeric value between 1 and 32) in 'Image Number' Slider component
				"png",	# str in 'Output Format' Radio component
				"-1",	# str in 'Seed' Textbox component
				True,	# bool in 'Read wildcards in order' Checkbox component
				0,	# int | float (numeric value between 0.0 and 30.0) in 'Image Sharpness' Slider component
				1,	# int | float (numeric value between 1.0 and 30.0) in 'Guidance Scale' Slider component
				"juggernautXL_version8Rundiffusion.safetensors",	# str (Option from: ['animaPencilXL_v500.safetensors', 'juggernautXL_v8Rundiffusion.safetensors', 'playground-v2.5-1024px-aesthetic.fp16.safetensors', 'ponyDiffusionV6XL.safetensors', 'realisticStockPhoto_v20.safetensors', 'sd_xl_base_1.0_0.9vae.safetensors', 'sd_xl_refiner_1.0_0.9vae.safetensors']) in 'Base Model (SDXL only)' Dropdown component
				"None",	# str (Option from: ['None', 'animaPencilXL_v500.safetensors', 'juggernautXL_v8Rundiffusion.safetensors', 'playground-v2.5-1024px-aesthetic.fp16.safetensors', 'ponyDiffusionV6XL.safetensors', 'realisticStockPhoto_v20.safetensors', 'sd_xl_base_1.0_0.9vae.safetensors', 'sd_xl_refiner_1.0_0.9vae.safetensors']) in 'Refiner (SDXL or SD 1.5)' Dropdown component
				0.1,	# int | float (numeric value between 0.1 and 1.0) in 'Refiner Switch At' Slider component
				True,	# bool in 'Enable' Checkbox component
				"None",	# str (Option from: ['None', 'sd_xl_offset_example-lora_1.0.safetensors', 'SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors', 'sdxl_hyper_sd_4step_lora.safetensors', 'sdxl_lcm_lora.safetensors', 'sdxl_lightning_4step_lora.safetensors']) in 'LoRA 1' Dropdown component
				-2,	# int | float (numeric value between -2 and 2) in 'Weight' Slider component
				True,	# bool in 'Enable' Checkbox component
				"None",	# str (Option from: ['None', 'sd_xl_offset_example-lora_1.0.safetensors', 'SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors', 'sdxl_hyper_sd_4step_lora.safetensors', 'sdxl_lcm_lora.safetensors', 'sdxl_lightning_4step_lora.safetensors']) in 'LoRA 2' Dropdown component
				-2,	# int | float (numeric value between -2 and 2) in 'Weight' Slider component
				True,	# bool in 'Enable' Checkbox component
				"None",	# str (Option from: ['None', 'sd_xl_offset_example-lora_1.0.safetensors', 'SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors', 'sdxl_hyper_sd_4step_lora.safetensors', 'sdxl_lcm_lora.safetensors', 'sdxl_lightning_4step_lora.safetensors'])	in 'LoRA 3' Dropdown component
				-2,	# int | float (numeric value between -2 and 2)	in 'Weight' Slider component
				True,	# bool in 'Enable' Checkbox component
				"None",	# str (Option from: ['None', 'sd_xl_offset_example-lora_1.0.safetensors', 'SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors', 'sdxl_hyper_sd_4step_lora.safetensors', 'sdxl_lcm_lora.safetensors', 'sdxl_lightning_4step_lora.safetensors']) in 'LoRA 4' Dropdown component
				-2,	# int | float (numeric value between -2 and 2) in 'Weight' Slider component
				True,	# bool in 'Enable' Checkbox component
				"None",	# str (Option from: ['None', 'sd_xl_offset_example-lora_1.0.safetensors', 'SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors', 'sdxl_hyper_sd_4step_lora.safetensors', 'sdxl_lcm_lora.safetensors', 'sdxl_lightning_4step_lora.safetensors']) in 'LoRA 5' Dropdown component
				-2,	# int | float (numeric value between -2 and 2) in 'Weight' Slider component
				True,	# bool in 'Input Image' Checkbox component
				"-1",	# str in 'parameter_212' Textbox component
				"Disabled",	# str in 'Upscale or Variation:' Radio component
				"https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",	# str (filepath or URL to image) in 'Image' Image component
				["Left"],	# List[str] in 'Outpaint Direction' Checkboxgroup component
				"https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",	# str (filepath or URL to image) in 'Image' Image component
				"",	# str in 'Inpaint Additional Prompt' Textbox component
				"https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",	# str (filepath or URL to image) in 'Mask Upload' Image component
				True,	# bool in 'Disable Preview' Checkbox component
				True,	# bool in 'Disable Intermediate Results' Checkbox component
				True,	# bool in 'Disable seed increment' Checkbox component
				False,	# bool in 'Black Out NSFW' Checkbox component
				0.1,	# int | float (numeric value between 0.1 and 3.0) in 'Positive ADM Guidance Scaler' Slider component
				0.1,	# int | float (numeric value between 0.1 and 3.0) in 'Negative ADM Guidance Scaler' Slider component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'ADM Guidance End At Step' Slider component
				1,	# int | float (numeric value between 1.0 and 30.0) in 'CFG Mimicking from TSNR' Slider component
				1,	# int | float (numeric value between 1 and 12) in 'CLIP Skip' Slider component
				"euler",	# str (Option from: ['euler', 'euler_ancestral', 'heun', 'heunpp2', 'dpm_2', 'dpm_2_ancestral', 'lms', 'dpm_fast', 'dpm_adaptive', 'dpmpp_2s_ancestral', 'dpmpp_sde', 'dpmpp_sde_gpu', 'dpmpp_2m', 'dpmpp_2m_sde', 'dpmpp_2m_sde_gpu', 'dpmpp_3m_sde', 'dpmpp_3m_sde_gpu', 'ddpm', 'lcm', 'tcd', 'restart', 'ddim', 'uni_pc', 'uni_pc_bh2']) in 'Sampler' Dropdown component
				"normal",	# str (Option from: ['normal', 'karras', 'exponential', 'sgm_uniform', 'simple', 'ddim_uniform', 'lcm', 'turbo', 'align_your_steps', 'tcd', 'edm_playground_v2.5'])	in 'Scheduler' Dropdown component
				"Default (model)",	# str (Option from: ['Default (model)', 'ponyDiffusionV6XL_vae.safetensors']) in 'VAE' Dropdown component
				-1,	# int | float (numeric value between -1 and 200)  in 'Forced Overwrite of Sampling Step' Slider component
				-1,	# int | float (numeric value between -1 and 200)  in 'Forced Overwrite of Refiner Switch Step' Slider component
				-1,	# int | float (numeric value between -1 and 2048) in 'Forced Overwrite of Generating Width' Slider component
				-1,	# int | float (numeric value between -1 and 2048) in 'Forced Overwrite of Generating Height' Slider component
				-1,	# int | float (numeric value between -1 and 1.0)  in 'Forced Overwrite of Denoising Strength of "Vary"' Slider component
				-1,	# int | float (numeric value between -1 and 1.0)  in 'Forced Overwrite of Denoising Strength of "Upscale"' Slider component
				True,	# bool in 'Mixing Image Prompt and Vary/Upscale' Checkbox component
				True,	# bool in 'Mixing Image Prompt and Inpaint' Checkbox component
				True,	# bool in 'Debug Preprocessors' Checkbox component
				True,	# bool in 'Skip Preprocessors' Checkbox component
				1,	# int | float (numeric value between 1 and 255)	in 'Canny Low Threshold' Slider component
				1,	# int | float (numeric value between 1 and 255)	in 'Canny High Threshold' Slider component
				"joint",	# str (Option from: ['joint', 'separate', 'vae']) in 'Refiner swap method' Dropdown component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Softness of ControlNet' Slider component
				True,	# bool in 'Enabled' Checkbox component
				0,	# int | float (numeric value between 0 and 2) in 'B1' Slider component
				0,	# int | float (numeric value between 0 and 2) in 'B2' Slider component
				0,	# int | float (numeric value between 0 and 4) in 'S1' Slider component
				0,	# int | float (numeric value between 0 and 4) in 'S2' Slider component
				True,	# bool in 'Debug Inpaint Preprocessing' Checkbox component
				True,	# bool in 'Disable initial latent in inpaint' Checkbox component
				"None",	# str (Option from: ['None', 'v1', 'v2.5', 'v2.6']) in 'Inpaint Engine' Dropdown component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Inpaint Denoising Strength' Slider component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Inpaint Respective Field' Slider component
				True,	# bool in 'Enable Advanced Masking Features' Checkbox component
				True,	# bool in 'Invert Mask When Generating' Checkbox component
				-64,	# int | float (numeric value between -64 and 64) in 'Mask Erode or Dilate' Slider component
				True,	# bool in 'Save only final enhanced image' Checkbox component
				True,	# bool in 'Save Metadata to Images' Checkbox component
				"fooocus",	# str in 'Metadata Scheme' Radio component
				"https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",	# str (filepath or URL to image) in 'Image' Image component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Stop At' Slider component
				0,	# int | float (numeric value between 0.0 and 2.0) in 'Weight' Slider component
				"ImagePrompt",	# str in 'Type' Radio component
				"https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",	# str (filepath or URL to image) in 'Image' Image component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Stop At' Slider component
				0,	# int | float (numeric value between 0.0 and 2.0) in 'Weight' Slider component
				"ImagePrompt",	# str in 'Type' Radio component
				"https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",	# str (filepath or URL to image) in 'Image' Image component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Stop At' Slider component
				0,	# int | float (numeric value between 0.0 and 2.0) in 'Weight' Slider component
				"ImagePrompt",	# str in 'Type' Radio component
				"https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",	# str (filepath or URL to image) in 'Image' Image component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Stop At' Slider component
				0,	# int | float (numeric value between 0.0 and 2.0) in 'Weight' Slider component
				"ImagePrompt",	# str in 'Type' Radio component
				True,	# bool in 'Debug GroundingDINO' Checkbox component
				-64,	# int | float (numeric value between -64 and 64) in 'GroundingDINO Box Erode or Dilate' Slider component
				True,	# bool in 'Debug Enhance Masks' Checkbox component
				"https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",	# str (filepath or URL to image) in 'Use with Enhance, skips image generation' Image component
				True,	# bool in 'Enhance' Checkbox component
				"Disabled",	# str in 'Upscale or Variation:' Radio component
				"Before First Enhancement",	# str in 'Order of Processing' Radio component
				"Original Prompts",	# str in 'Prompt' Radio component
				True,	# bool in 'Enable' Checkbox component
				"",	# str in 'Detection prompt' Textbox component
				"",	# str in 'Enhancement positive prompt' Textbox component
				"",	# str in 'Enhancement negative prompt' Textbox component
				"u2net",	# str (Option from: ['u2net', 'u2netp', 'u2net_human_seg', 'u2net_cloth_seg', 'silueta', 'isnet-general-use', 'isnet-anime', 'sam']) in 'Mask generation model' Dropdown component
				"full",	# str (Option from: ['full', 'upper', 'lower']) in 'Cloth category' Dropdown component
				"vit_b",	# str (Option from: ['vit_b', 'vit_l', 'vit_h']) in 'SAM model' Dropdown component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Text Threshold' Slider component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Box Threshold' Slider component
				0,	# int | float (numeric value between 0 and 10) in 'Maximum number of detections' Slider component
				True,	# bool in 'Disable initial latent in inpaint' Checkbox component
				"None",	# str (Option from: ['None', 'v1', 'v2.5', 'v2.6']) in 'Inpaint Engine' Dropdown component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Inpaint Denoising Strength' Slider component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Inpaint Respective Field' Slider component
				-64,	# int | float (numeric value between -64 and 64) in 'Mask Erode or Dilate' Slider component
				True,	# bool in 'Invert Mask' Checkbox component
				True,	# bool in 'Enable' Checkbox component
				"",	# str in 'Detection prompt' Textbox component
				"",	# str in 'Enhancement positive prompt' Textbox component
				"",	# str in 'Enhancement negative prompt' Textbox component
				"u2net",	# str (Option from: ['u2net', 'u2netp', 'u2net_human_seg', 'u2net_cloth_seg', 'silueta', 'isnet-general-use', 'isnet-anime', 'sam']) in 'Mask generation model' Dropdown component
				"full",	# str (Option from: ['full', 'upper', 'lower']) in 'Cloth category' Dropdown component
				"vit_b",	# str (Option from: ['vit_b', 'vit_l', 'vit_h']) in 'SAM model' Dropdown component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Text Threshold' Slider component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Box Threshold' Slider component
				0,	# int | float (numeric value between 0 and 10) in 'Maximum number of detections' Slider component
				True,	# bool in 'Disable initial latent in inpaint' Checkbox component
				"None",	# str (Option from: ['None', 'v1', 'v2.5', 'v2.6']) in 'Inpaint Engine' Dropdown component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Inpaint Denoising Strength' Slider component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Inpaint Respective Field' Slider component
				-64,	# int | float (numeric value between -64 and 64) in 'Mask Erode or Dilate' Slider component
				True,	# bool in 'Invert Mask' Checkbox component True,	# bool in 'Enable' Checkbox component
				"",	# str in 'Detection prompt' Textbox component
				"",	# str in 'Enhancement positive prompt' Textbox component
				"",	# str in 'Enhancement negative prompt' Textbox component
				"u2net",	# str (Option from: ['u2net', 'u2netp', 'u2net_human_seg', 'u2net_cloth_seg', 'silueta', 'isnet-general-use', 'isnet-anime', 'sam']) in 'Mask generation model' Dropdown component
				"full",	# str (Option from: ['full', 'upper', 'lower'])	in 'Cloth category' Dropdown component
				"vit_b",	# str (Option from: ['vit_b', 'vit_l', 'vit_h']) in 'SAM model' Dropdown component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Text Threshold' Slider component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Box Threshold' Slider component
				0,	# int | float (numeric value between 0 and 10) in 'Maximum number of detections' Slider component
				True,	# bool in 'Disable initial latent in inpaint' Checkbox component
				"None",	# str (Option from: ['None', 'v1', 'v2.5', 'v2.6']) in 'Inpaint Engine' Dropdown component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Inpaint Denoising Strength' Slider component
				0,	# int | float (numeric value between 0.0 and 1.0) in 'Inpaint Respective Field' Slider component
				-64,	# int | float (numeric value between -64 and 64) in 'Mask Erode or Dilate' Slider component
				True,	# bool in 'Invert Mask' Checkbox component
				fn_index=67
    )
    print(result)

# Function to generate the mask using Florence SAM Masking API (Replicate)
def generate_mask(image_path, text_prompt="clothing"):
    client_sam = Client("SkalskiP/florence-sam-masking")
    mask_result = client_sam.predict(
        #mode_dropdown = "open vocabulary detection + image masks",
        image_input=handle_file(image_path),  # Provide your image path here
        text_input=text_prompt,  # Use "clothing" as the prompt
        api_name="/process_image"
    )
    print("mask_result=", mask_result)
    return mask_result  # This is the local path to the generated mask

# Save the generated mask
def save_mask(mask_local_path, save_path="generated_mask.png"):
    try:
        shutil.copy(mask_local_path, save_path)
    except Exception as e:
        print(f"Failed to save the mask: {e}")

# Function to perform inpainting
"""
def inpaint_image(input_image, mask_image):
    prompt = "undress, naked, real skin, detailed nipples, erect nipples, detailed pussy, (detailed nipples), (detailed skin), (detailed pussy), accurate anatomy"
    negative_prompt = "bad anatomy, deformed, ugly, disfigured, (extra arms), (extra legs), (extra hands), (extra feet), (extra finger)"
    
    #IMAGE_SIZE = (1024,1024)
    #initial_input_image = input_image.resize(IMAGE_SIZE)
    #initial_mask_image = mask_image.resize(IMAGE_SIZE)
    #blurred_mask_image = inpaint_pipeline.mask_processor.blur(initial_mask_image,blur_factor=10)
    #result = inpaint_pipeline(prompt=prompt, negative_prompt=negative_prompt, height=IMAGE_SIZE[0], width=IMAGE_SIZE[0], image=initial_input_image, mask_image=blurred_mask_image, padding_mask_crop=32)
    
    #blurred_mask_image = inpaint_pipeline.mask_processor.blur(mask_image,blur_factor=10)
    result = inpaint_pipeline(prompt=prompt, negative_prompt=negative_prompt, image=input_image, mask_image=mask_image, padding_mask_crop=10)
    inpainted_image = result.images[0]
    #inpainted_image = resize_to_match(input_image, inpainted_image)
    return inpainted_image
"""

# Function to process input image and mask
def process_image(input_image):
    # Save the input image temporarily to process with Replicate
    input_image_path = "temp_input_image.png"
    input_image.save(input_image_path)

    # Generate the mask using Florence SAM API
    mask_local_path = generate_mask(image_path=input_image_path)
    #mask_local_path1 = str(mask_local_path)#[0])
    
    # Save the generated mask
    mask_image_path = "generated_mask.png"
    save_mask(mask_local_path, save_path=mask_image_path)

    # Open the mask image and perform inpainting
    mask_image = Image.open(mask_image_path)
    
    result_image = resize_image(input_image, mask_image)

    # Clean up temporary files
    os.remove(input_image_path)
    os.remove(mask_image_path)

    return result_image

# Define Gradio interface using Blocks API
with gr.Blocks() as demo:
    with gr.Row():
        input_image = gr.Image(label="Upload Input Image", type="pil")
        output_image = gr.Image(type="pil", label="Output Image")

    # Button to trigger the process
    with gr.Row():
        btn = gr.Button("Run Inpainting")

    # Function to run when button is clicked
    btn.click(fn=process_image, inputs=[input_image], outputs=output_image)

# Launch the Gradio app
demo.launch(share=True)