Akjava's picture
refactoring
8f2132a
raw
history blame
6.39 kB
import spaces
import torch
from diffusers import FluxInpaintPipeline
import gradio as gr
import re
from PIL import Image
import os
import numpy as np
def convert_to_fit_size(original_width_and_height, maximum_size = 2048):
width, height =original_width_and_height
if width <= maximum_size and height <= maximum_size:
return width,height
if width > height:
scaling_factor = maximum_size / width
else:
scaling_factor = maximum_size / height
new_width = int(width * scaling_factor)
new_height = int(height * scaling_factor)
return new_width, new_height
def adjust_to_multiple_of_32(width: int, height: int):
width = width - (width % 32)
height = height - (height % 32)
return width, height
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = FluxInpaintPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(device)
def sanitize_prompt(prompt):
# Allow only alphanumeric characters, spaces, and basic punctuation
allowed_chars = re.compile(r"[^a-zA-Z0-9\s.,!?-]")
sanitized_prompt = allowed_chars.sub("", prompt)
return sanitized_prompt
@spaces.GPU(duration=120)
def process_examples(image, image2=None,prompt="a girl",inpaint_model="black-forest-labs/FLUX.1-schnell",strength=0.75,seed=0,progress=gr.Progress(track_tqdm=True)):
# I'm not sure when this happen
progress(0, desc="start-process-examples")
print("start-process-examples")
if not isinstance(image, dict):
if image2 == None:
print("empty mask")
return image,None
else:
image = dict({'background': image, 'layers': [image2]})
if image2!=None:
#print("use image2")
mask = image2
else:
if len(image['layers']) == 0:
print("empty mask")
return image
print("use layer")
mask = image['layers'][0]
def process_inpaint(image,mask_image,prompt="a person",model_id="black-forest-labs/FLUX.1-schnell",strength=0.75,seed=0,num_inference_steps=4):
if image == None:
return None
generators = []
generator = torch.Generator("cuda").manual_seed(seed)
generators.append(generator)
width,height = convert_to_fit_size(image.size)
print(f"fit {width}x{height}")
width,height = adjust_to_multiple_of_32(width,height)
print(f"multiple {width}x{height}")
image = image.resize((width, height), Image.LANCZOS)
mask_image = mask_image.resize((width, height), Image.NEAREST)
output = pipe(prompt=prompt, image=image, mask_image=mask_image,generator=generator,strength=strength,width=width,height=height,
guidance_scale=0,num_inference_steps=num_inference_steps,max_sequence_length=256)
return output.examples[0]
output = process_inpaint(image["background"],mask,prompt,inpaint_model,strength,seed)
return output,mask
def read_file(path: str) -> str:
with open(path, 'r', encoding='utf-8') as f:
content = f.read()
return content
css="""
#col-left {
margin: 0 auto;
max-width: 640px;
}
#col-right {
margin: 0 auto;
max-width: 640px;
}
.grid-container {
display: flex;
align-items: center;
justify-content: center;
gap:10px
}
.image {
width: 128px;
height: 128px;
object-fit: cover;
}
.text {
font-size: 16px;
}
"""
with gr.Blocks(css=css, elem_id="demo-container") as demo:
with gr.Column():
gr.HTML(read_file("demo_header.html"))
gr.HTML(read_file("tools.html"))
with gr.Row():
with gr.Column():
image = gr.ImageEditor(height=800,sources=['upload','clipboard'],transforms=[],image_mode='RGB', layers=False, elem_id="image_upload", type="pil", label="Upload",brush=gr.Brush(colors=["#fff"], color_mode="fixed"))
with gr.Row(elem_id="prompt-container", equal_height=False):
with gr.Row():
prompt = gr.Textbox(label="Prompt",value="a person",placeholder="Your prompt (what you want in place of what is erased)", elem_id="prompt")
btn = gr.Button("Inpaint", elem_id="run_button",variant="primary")
image_mask = gr.Image(sources=['upload','clipboard'], elem_id="mask_upload", type="pil", label="Mask_Upload",height=400, value=None)
with gr.Accordion(label="Advanced Settings", open=False):
with gr.Row( equal_height=True):
strength = gr.Number(value=0.75, minimum=0, maximum=1.0, step=0.01, label="Inpaint strength")
seed = gr.Number(value=0, minimum=0, step=1, label="Inpaint seed")
models = ["black-forest-labs/FLUX.1-schnell"]
inpaint_model = gr.Dropdown(label="modes", choices=models, value="black-forest-labs/FLUX.1-schnell")
id_input=gr.Text(label="Name", visible=False)
with gr.Column():
image_out = gr.Image(height=800,sources=[],label="Output", elem_id="output-img",format="webp")
mask_out = gr.Image(height=800,sources=[],label="Mask", elem_id="mask-img",format="jpeg")
btn.click(fn=process_examples, inputs=[image, image_mask,prompt,inpaint_model,strength,seed], outputs =[image_out,mask_out], api_name='infer')
gr.Examples(
examples=[
["examples/00547245_99.jpg", "examples/00547245_99_mask.jpg","a beautiful girl,eyes closed",0.8,"examples/00547245.jpg"],
["examples/00538245_paint.jpg", "examples/00538245_mask.jpg","a beautiful girl,wearing t-shirt",0.7,"examples/00538245.jpg"],
["examples/00207245_18.jpg", "examples/00207245_18_mask.jpg","a beautiful girl,mouth opened",0.2,"examples/00207245.jpg"]
]
,
#fn=example_out,
inputs=[image,image_mask,prompt,strength,image_out],
#outputs=[test_out],
#cache_examples=False,
)
gr.HTML(
gr.HTML(read_file("demo_footer.html"))
)
demo.launch()