kadirnar's picture
Upload 28 files
3911a99
raw
history blame
7.09 kB
from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
import torch
from diffusers import (StableDiffusionControlNetPipeline,
ControlNetModel, UniPCMultistepScheduler)
from PIL import Image
import gradio as gr
import numpy as np
import torch
stable_model_list = [
"runwayml/stable-diffusion-v1-5",
"stabilityai/stable-diffusion-2",
"stabilityai/stable-diffusion-2-base",
"stabilityai/stable-diffusion-2-1",
"stabilityai/stable-diffusion-2-1-base"
]
stable_inpiant_model_list = [
"stabilityai/stable-diffusion-2-inpainting",
"runwayml/stable-diffusion-inpainting"
]
stable_prompt_list = [
"a photo of a man.",
"a photo of a girl."
]
stable_negative_prompt_list = [
"bad, ugly",
"deformed"
]
def ade_palette():
"""ADE20K palette that maps each class to RGB values."""
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
[102, 255, 0], [92, 0, 255]]
def controlnet_mlsd(image_path:str):
image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
image = Image.open(image_path).convert('RGB')
pixel_values = image_processor(image, return_tensors="pt").pixel_values
with torch.no_grad():
outputs = image_segmentor(pixel_values)
seg = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
palette = np.array(ade_palette())
for label, color in enumerate(palette):
color_seg[seg == label, :] = color
color_seg = color_seg.astype(np.uint8)
image = Image.fromarray(color_seg)
controlnet = ControlNetModel.from_pretrained(
"fusing/stable-diffusion-v1-5-controlnet-seg", torch_dtype=torch.float16
)
return controlnet, image
def stable_diffusion_controlnet_seg(
image_path:str,
model_path:str,
prompt:str,
negative_prompt:str,
guidance_scale:int,
num_inference_step:int,
):
controlnet, image = controlnet_mlsd(image_path=image_path)
pipe = StableDiffusionControlNetPipeline.from_pretrained(
pretrained_model_name_or_path=model_path,
controlnet=controlnet,
safety_checker=None,
torch_dtype=torch.float16
)
pipe.to("cuda")
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_xformers_memory_efficient_attention()
output = pipe(
prompt = prompt,
image = image,
negative_prompt = negative_prompt,
num_inference_steps = num_inference_step,
guidance_scale = guidance_scale,
).images
return output[0]
def stable_diffusion_controlnet_seg_app():
with gr.Tab('Segmentation'):
controlnet_seg_image_file = gr.Image(
type='filepath',
label='Image'
)
controlnet_seg_model_id = gr.Dropdown(
choices=stable_model_list,
value=stable_model_list[0],
label='Stable Model Id'
)
controlnet_seg_prompt = gr.Textbox(
lines=1,
value=stable_prompt_list[0],
label='Prompt'
)
controlnet_seg_negative_prompt = gr.Textbox(
lines=1,
value=stable_negative_prompt_list[0],
label='Negative Prompt'
)
with gr.Accordion("Advanced Options", open=False):
controlnet_seg_guidance_scale = gr.Slider(
minimum=0.1,
maximum=15,
step=0.1,
value=7.5,
label='Guidance Scale'
)
controlnet_seg_num_inference_step = gr.Slider(
minimum=1,
maximum=100,
step=1,
value=50,
label='Num Inference Step'
)
controlnet_seg_predict = gr.Button(value='Generator')
variables = {
'image_path': controlnet_seg_image_file,
'model_path': controlnet_seg_model_id,
'prompt': controlnet_seg_prompt,
'negative_prompt': controlnet_seg_negative_prompt,
'guidance_scale': controlnet_seg_guidance_scale,
'num_inference_step': controlnet_seg_num_inference_step,
'predict': controlnet_seg_predict,
}
return variables