Spaces:
Running
on
Zero
Running
on
Zero
File size: 8,053 Bytes
7a3d678 5fad7da c0f4ece 0c03a70 c0f4ece 5fad7da 4fd86e6 0c03a70 4fd86e6 b6517cd 7a3d678 1779015 0c03a70 7a3d678 409a240 e97db9b 7a3d678 344fffb 5fad7da 7a3d678 344fffb 7a3d678 344fffb 7a3d678 c0f4ece 5fad7da b6517cd 5fad7da 1779015 0c03a70 f186505 0c03a70 f186505 4fd86e6 1415b12 f0a134a 4fd86e6 c0f4ece 5fad7da 0c03a70 5fad7da f186505 7a3d678 8f2132a 7a3d678 77ad73e 7a3d678 bc80923 7a3d678 397db89 7a3d678 0c03a70 13500a1 7a3d678 397db89 c86e792 397db89 7a3d678 5fad7da 469d515 7a3d678 0c03a70 7a3d678 c86e792 fb4f005 3333379 8f2132a c86e792 7a3d678 379ca27 7a3d678 8f2132a 7a3d678 6f28201 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
import spaces
import torch
from diffusers import FluxInpaintPipeline
import gradio as gr
import re
from PIL import Image,ImageFilter
import os
import numpy as np
def convert_to_fit_size(original_width_and_height, maximum_size = 2048):
width, height =original_width_and_height
if width <= maximum_size and height <= maximum_size:
return width,height
if width > height:
scaling_factor = maximum_size / width
else:
scaling_factor = maximum_size / height
new_width = int(width * scaling_factor)
new_height = int(height * scaling_factor)
return new_width, new_height
def adjust_to_multiple_of_32(width: int, height: int):
width = width - (width % 32)
height = height - (height % 32)
return width, height
def mask_to_donut(mask,size):
if size%2 ==0:
size+=1
dilation_mask = mask.filter(ImageFilter.MaxFilter(size))
white_img = Image.new('RGB', mask.size, (255,255,255))
black_img = Image.new('RGB', mask.size, (0,0,0))
white_img.paste(black_img,(0,0),dilation_mask.convert("L"))
white_img.paste(mask,(0,0),mask.convert("L"))
return white_img
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = FluxInpaintPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(device)
def sanitize_prompt(prompt):
# Allow only alphanumeric characters, spaces, and basic punctuation
allowed_chars = re.compile(r"[^a-zA-Z0-9\s.,!?-]")
sanitized_prompt = allowed_chars.sub("", prompt)
return sanitized_prompt
@spaces.GPU(duration=120)
def process_images(image, image2=None,prompt="a girl",inpaint_model="black-forest-labs/FLUX.1-schnell",strength=0.75,seed=0,donut_mask=True,donut_size=32,progress=gr.Progress(track_tqdm=True)):
# I'm not sure when this happen
progress(0, desc="start-process-images")
#print("start-process-images")
if not isinstance(image, dict):
if image2 == None:
#print("empty mask")
return image,None
else:
image = dict({'background': image, 'layers': [image2]})
if image2!=None:
#print("use image2")
mask = image2
else:
if len(image['layers']) == 0:
#print("empty mask")
return image
#print("use layer")
mask = image['layers'][0]
def process_inpaint(image,mask_image,prompt="a person",model_id="black-forest-labs/FLUX.1-schnell",strength=0.75,seed=0,num_inference_steps=4):
if image == None:
return None
generators = []
generator = torch.Generator("cuda").manual_seed(seed)
generators.append(generator)
fit_width,fit_height = convert_to_fit_size(image.size)
#print(f"fit {width}x{height}")
width,height = adjust_to_multiple_of_32(fit_width,fit_height)
#print(f"multiple {width}x{height}")
image = image.resize((width, height), Image.LANCZOS)
mask_image = mask_image.resize((width, height), Image.NEAREST)
mask_image = mask_image.convert("RGB")
output = pipe(prompt=prompt, image=image, mask_image=mask_image,generator=generator,strength=strength,width=width,height=height,
guidance_scale=0,num_inference_steps=num_inference_steps,max_sequence_length=256)
return output.images[0],mask_image,image,fit_width,fit_height
if donut_mask:
original_mask = mask
mask = mask_to_donut(mask,donut_size)
#output,mask_image,image_resized,fit_width,fit_height=image["background"],mask,image["background"],512,512
output,mask_image,image_resized,fit_width,fit_height = process_inpaint(image["background"],mask,prompt,inpaint_model,strength,seed)
if donut_mask:
mask = original_mask.resize(mask_image.size)
image_resized.paste(output,(0,0),mask.convert("L"))
output = image_resized.resize((fit_width,fit_height),Image.LANCZOS)
mask_image = mask.resize(output.size)
else:
output = output.resize((fit_width,fit_height),Image.LANCZOS)
mask_image = mask_image.resize(output.size)
return output,mask_image
def read_file(path: str) -> str:
with open(path, 'r', encoding='utf-8') as f:
content = f.read()
return content
css="""
#col-left {
margin: 0 auto;
max-width: 640px;
}
#col-right {
margin: 0 auto;
max-width: 640px;
}
.grid-container {
display: flex;
align-items: center;
justify-content: center;
gap:10px
}
.image {
width: 128px;
height: 128px;
object-fit: cover;
}
.text {
font-size: 16px;
}
"""
with gr.Blocks(css=css, elem_id="demo-container") as demo:
with gr.Column():
gr.HTML(read_file("demo_header.html"))
gr.HTML(read_file("demo_tools.html"))
with gr.Row():
with gr.Column():
image = gr.ImageEditor(height=800,sources=['upload','clipboard'],transforms=[],image_mode='RGB', layers=False, elem_id="image_upload", type="pil", label="Upload",brush=gr.Brush(colors=["#fff"], color_mode="fixed"))
with gr.Row(elem_id="prompt-container", equal_height=False):
prompt = gr.Textbox(label="Prompt",value="a person",placeholder="Your prompt (what you want in place of what is erased)", elem_id="prompt")
with gr.Row(equal_height=True):
donut_mask = gr.Checkbox(label="Donut Mask",value=False,info="Usually improve result,but slow.Do second example things")
donut_size = gr.Slider(label="Donut Size",minimum=1,maximum=64,step=1,value=32,info="Larger value make extreamly slow")
btn = gr.Button("Inpaint", elem_id="run_button",variant="primary")
image_mask = gr.Image(sources=['upload','clipboard'], elem_id="mask_upload", type="pil", label="Mask_Upload",height=400, value=None)
with gr.Accordion(label="Advanced Settings", open=False):
with gr.Row( equal_height=True):
strength = gr.Number(value=0.75, minimum=0, maximum=1.0, step=0.01, label="Inpaint strength")
seed = gr.Number(value=0, minimum=0, step=1, label="Inpaint seed")
models = ["black-forest-labs/FLUX.1-schnell"]
inpaint_model = gr.Dropdown(label="modes", choices=models, value="black-forest-labs/FLUX.1-schnell")
id_input=gr.Text(label="Name", visible=False)
with gr.Column():
image_out = gr.Image(height=800,sources=[],label="Output", elem_id="output-img",format="webp")
mask_out = gr.Image(height=800,sources=[],label="Mask", elem_id="mask-img",format="jpeg")
btn.click(fn=process_images, inputs=[image, image_mask,prompt,inpaint_model,strength,seed,donut_mask,donut_size], outputs =[image_out,mask_out], api_name='infer')
gr.Examples(
examples=[
["examples/00538245.jpg", "examples/normal_mouth_mask.jpg","a beautiful girl,big-smile",0.75,"examples/normal_mouth_mask_result.jpg"],
["examples/00538245.jpg", "examples/expand_mouth_mask.jpg","a beautiful girl,big-smile",0.75,"examples/expand_mouth_mask_result.jpg"],
["examples/00547245_99.jpg", "examples/00547245_99_mask.jpg","a beautiful girl,eyes closed",0.75,"examples/00547245.jpg"],
["examples/00207245_18.jpg", "examples/00207245_18_mask.jpg","a beautiful girl,mouth opened",0.2,"examples/00207245.jpg"]
]
,
#fn=example_out,
inputs=[image,image_mask,prompt,strength,image_out],
#outputs=[test_out],
#cache_examples=False,
)
gr.HTML(
gr.HTML(read_file("demo_footer.html"))
)
if __name__ == "__main__":
demo.launch()
|