dvir-bria commited on
Commit
6c4e5fc
1 Parent(s): 0068ac1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -0
app.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
2
+ from diffusers.utils import load_image
3
+ from PIL import Image
4
+ import torch
5
+ import numpy as np
6
+ import cv2
7
+ import gradio as gr
8
+ from torchvision import transforms
9
+
10
+ controlnet = ControlNetModel.from_pretrained(
11
+ "briaai/BRIA-2.2-ControlNet-Canny",
12
+ torch_dtype=torch.float16
13
+ ).to('cuda')
14
+
15
+ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
16
+ "briaai/BRIA-2.2",
17
+ controlnet=controlnet,
18
+ torch_dtype=torch.float16,
19
+ device_map='auto',
20
+ low_cpu_mem_usage=True,
21
+ offload_state_dict=True,
22
+ ).to('cuda')
23
+ pipe.scheduler = EulerAncestralDiscreteScheduler(
24
+ beta_start=0.00085,
25
+ beta_end=0.012,
26
+ beta_schedule="scaled_linear",
27
+ num_train_timesteps=1000,
28
+ steps_offset=1
29
+ )
30
+ # pipe.enable_freeu(b1=1.1, b2=1.1, s1=0.5, s2=0.7)
31
+ pipe.enable_xformers_memory_efficient_attention()
32
+ pipe.force_zeros_for_empty_prompt = False
33
+
34
+ from transformers import DPTFeatureExtractor, DPTForDepthEstimation
35
+ depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda")
36
+ feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-hybrid-midas")
37
+
38
+ def resize_image(image):
39
+ image = image.convert('RGB')
40
+ current_size = image.size
41
+ if current_size[0] > current_size[1]:
42
+ center_cropped_image = transforms.functional.center_crop(image, (current_size[1], current_size[1]))
43
+ else:
44
+ center_cropped_image = transforms.functional.center_crop(image, (current_size[0], current_size[0]))
45
+ resized_image = transforms.functional.resize(center_cropped_image, (1024, 1024))
46
+ return resized_image
47
+
48
+ def get_depth_map(image):
49
+ image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda")
50
+ with torch.no_grad(), torch.autocast("cuda"):
51
+ depth_map = depth_estimator(image).predicted_depth
52
+ image = transforms.functional.center_crop(image, min(image.shape[-2:]))
53
+ depth_map = torch.nn.functional.interpolate(
54
+ depth_map.unsqueeze(1),
55
+ size=(1024, 1024),
56
+ mode="bicubic",
57
+ align_corners=False,
58
+ )
59
+ depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)
60
+ depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)
61
+ depth_map = (depth_map - depth_min) / (depth_max - depth_min)
62
+ image = torch.cat([depth_map] * 3, dim=1)
63
+ image = image.permute(0, 2, 3, 1).cpu().numpy()[0]
64
+ image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8))
65
+ return image
66
+
67
+
68
+ def process(input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed):
69
+ generator = torch.manual_seed(seed)
70
+
71
+ # resize input_image to 1024x1024
72
+ input_image = resize_image(input_image)
73
+
74
+ depth_image = get_depth_map(input_image)
75
+
76
+ images = pipe(
77
+ prompt, negative_prompt=negative_prompt, image=depth_image, num_inference_steps=num_steps, controlnet_conditioning_scale=float(controlnet_conditioning_scale),
78
+ generator=generator,
79
+ ).images
80
+
81
+ return [canny_image, images[0]]
82
+
83
+ block = gr.Blocks().queue()
84
+
85
+ with block:
86
+ gr.Markdown("## BRIA 2.2 ControlNet Canny")
87
+ gr.HTML('''
88
+ <p style="margin-bottom: 10px; font-size: 94%">
89
+ This is a demo for ControlNet Canny that using
90
+ <a href="https://huggingface.co/briaai/BRIA-2.2" target="_blank">BRIA 2.2 text-to-image model</a> as backbone.
91
+ Trained on licensed data, BRIA 2.2 provide full legal liability coverage for copyright and privacy infringement.
92
+ </p>
93
+ ''')
94
+ with gr.Row():
95
+ with gr.Column():
96
+ input_image = gr.Image(sources=None, type="pil") # None for upload, ctrl+v and webcam
97
+ prompt = gr.Textbox(label="Prompt")
98
+ negative_prompt = gr.Textbox(label="Negative prompt", value="Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers")
99
+ num_steps = gr.Slider(label="Number of steps", minimum=25, maximum=100, value=50, step=1)
100
+ controlnet_conditioning_scale = gr.Slider(label="ControlNet conditioning scale", minimum=0.1, maximum=2.0, value=1.0, step=0.05)
101
+ seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True,)
102
+ run_button = gr.Button(value="Run")
103
+
104
+
105
+ with gr.Column():
106
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=[2], height='auto')
107
+ ips = [input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed]
108
+ run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
109
+
110
+ block.launch(debug = True)