import spaces import torch from diffusers import FluxInpaintPipeline dtype = torch.bfloat16 device = "cuda" if torch.cuda.is_available() else "cpu" pipe = FluxInpaintPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(device) import gradio as gr import re from PIL import Image import os import numpy as np def sanitize_prompt(prompt): # Allow only alphanumeric characters, spaces, and basic punctuation allowed_chars = re.compile(r"[^a-zA-Z0-9\s.,!?-]") sanitized_prompt = allowed_chars.sub("", prompt) return sanitized_prompt @spaces.GPU(duration=180) def process_images(image, image2=None,prompt="a girl",inpaint_model="black-forest-labs/FLUX.1-schnell",strength=0.75,seed=0,progress=gr.Progress(track_tqdm=True)): # I'm not sure when this happen progress(0, desc="start-process-images") print("start-process-images") if not isinstance(image, dict): if image2 == None: print("empty mask") return image,None else: image = dict({'background': image, 'layers': [image2]}) if image2!=None: #print("use image2") mask = image2 else: if len(image['layers']) == 0: print("empty mask") return image print("use layer") mask = image['layers'][0] def process_image(image,mask_image,prompt="a person",model_id="black-forest-labs/FLUX.1-schnell",strength=0.75,seed=0,num_inference_steps=4): if image == None: return None generators = [] generator = torch.Generator("cuda").manual_seed(seed) generators.append(generator) # more parameter see https://huggingface.co/docs/diffusers/api/pipelines/flux#diffusers.FluxInpaintPipeline output = pipe(prompt=prompt, image=image, mask_image=mask_image,generator=generator,strength=strength) return output.images[0] output = process_image(image["background"],mask,prompt,inpaint_model,strength,seed) return output,mask def read_file(path: str) -> str: with open(path, 'r', encoding='utf-8') as f: content = f.read() return content def example_out(image,image_mask,prompt,strength,example_id): # input #parent,file=os.path.split(image_mask) # image is complex dict #base,ext = os.path.splitext(file) #key = base.split("_")[0] return f"images/{example_id}.jpg" #loaded_image = Image.open(f"images/{example_id}.jpg") #return loaded_image #return np.array(loaded_image) css=""" #col-left { margin: 0 auto; max-width: 640px; } #col-right { margin: 0 auto; max-width: 640px; } """ demo_blocks = gr.Blocks(css=css, elem_id="demo-container") with demo_blocks as demo: with gr.Column(): gr.HTML(read_file("demo_header.html")) with gr.Row(): with gr.Column(): image = gr.ImageEditor(height=800,sources=['upload','clipboard'],transforms=[],image_mode='RGB', layers=False, elem_id="image_upload", type="pil", label="Upload",brush=gr.Brush(colors=["#fff"], color_mode="fixed")) with gr.Row(elem_id="prompt-container", equal_height=False): with gr.Row(): prompt = gr.Textbox(label="Prompt",value="a eyes closed girl,shut eyes",placeholder="Your prompt (what you want in place of what is erased)", elem_id="prompt") btn = gr.Button("Inpaint", elem_id="run_button",variant="primary") image_mask = gr.Image(sources=['upload','clipboard'], elem_id="mask_upload", type="pil", label="Mask_Upload",height=400, value=None) with gr.Accordion(label="Advanced Settings", open=False): with gr.Row( equal_height=True): strength = gr.Number(value=0.75, minimum=0, maximum=1.0, step=0.01, label="Inpaint strength") seed = gr.Number(value=0, minimum=0, step=1, label="Inpaint seed") models = ["black-forest-labs/FLUX.1-schnell"] inpaint_model = gr.Dropdown(label="modes", choices=models, value="black-forest-labs/FLUX.1-schnell") id_input=gr.Text(label="Name", visible=False) with gr.Column(): image_out = gr.Image(height=800,sources=[],label="Output", elem_id="output-img",format="webp") mask_out = gr.Image(height=800,sources=[],label="Mask", elem_id="mask-img",format="jpeg") btn.click(fn=process_images, inputs=[image, image_mask,prompt,inpaint_model,strength,seed], outputs =[image_out,mask_out], api_name='infer') gr.Examples( examples=[ ["images/00547245_99.jpg", "images/00547245_99_mask.jpg","a beautiful girl,eyes closed",0.8,"images/00547245.jpg"], ["images/00538245_paint.jpg", "images/00538245_mask.jpg","a beautiful girl,wearing t-shirt",0.7,"images/00538245.jpg"], ["images/00207245_18.jpg", "images/00207245_18_mask.jpg","a beautiful girl,mouth opened",0.2,"images/00207245.jpg"] ] , #fn=example_out, inputs=[image,image_mask,prompt,strength,image_out], #outputs=[test_out], #cache_examples=False, ) gr.HTML( """ """ ) demo_blocks.queue(max_size=25).launch(share=False,debug=True)