LinKadel commited on
Commit
9dda282
1 Parent(s): e12a929

Upload 3 files

Browse files
ArmorSuit_v1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a16b4cf713537c007b8cf1dc44e13e2730503b3a822760cbf46f9d7cb8fb5a0b
3
+ size 151112956
Cyberspace_background_composer.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a4cc2f5cb421a4859f96a8e228d37cad7ef4854c87e9602ccb83cbe435652d5
3
+ size 75613462
app.py CHANGED
@@ -1,7 +1,189 @@
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
  import gradio as gr
3
+ from gradio import processing_utils, utils
4
+ from PIL import Image
5
+ import random
6
+ from diffusers import (
7
+ DiffusionPipeline,
8
+ AutoencoderKL,
9
+ StableDiffusionControlNetPipeline,
10
+ ControlNetModel,
11
+ StableDiffusionLatentUpscalePipeline,
12
+ StableDiffusionImg2ImgPipeline,
13
+ StableDiffusionControlNetImg2ImgPipeline,
14
+ DPMSolverMultistepScheduler, # <-- Added import
15
+ EulerDiscreteScheduler # <-- Added import
16
+ )
17
 
18
+ import time
19
+ from share_btn import community_icon_html, loading_icon_html, share_js
20
+ import user_history
21
+ from illusion_style import css
22
 
23
+ BASE_MODEL = "SG161222/Realistic_Vision_V5.1_noVAE"
24
+
25
+ # Initialize both pipelines
26
+ vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
27
+ #init_pipe = DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V5.1_noVAE", torch_dtype=torch.float16)
28
+ controlnet = ControlNetModel.from_pretrained("monster-labs/control_v1p_sd15_qrcode_monster", torch_dtype=torch.float16)#, torch_dtype=torch.float16)
29
+ main_pipe = StableDiffusionControlNetPipeline.from_pretrained(
30
+ BASE_MODEL,
31
+ controlnet=controlnet,
32
+ vae=vae,
33
+ safety_checker=None,
34
+ torch_dtype=torch.float16,
35
+ ).to("cuda")
36
+
37
+ #main_pipe.unet = torch.compile(main_pipe.unet, mode="reduce-overhead", fullgraph=True)
38
+ #main_pipe.unet.to(memory_format=torch.channels_last)
39
+ #main_pipe.unet = torch.compile(main_pipe.unet, mode="reduce-overhead", fullgraph=True)
40
+ #model_id = "stabilityai/sd-x2-latent-upscaler"
41
+ image_pipe = StableDiffusionControlNetImg2ImgPipeline(**main_pipe.components)
42
+
43
+ #image_pipe.unet = torch.compile(image_pipe.unet, mode="reduce-overhead", fullgraph=True)
44
+ #upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16)
45
+ #upscaler.to("cuda")
46
+
47
+
48
+ # Sampler map
49
+ SAMPLER_MAP = {
50
+ "DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True, algorithm_type="sde-dpmsolver++"),
51
+ "Euler": lambda config: EulerDiscreteScheduler.from_config(config),
52
+ }
53
+
54
+ def center_crop_resize(img, output_size=(512, 512)):
55
+ width, height = img.size
56
+
57
+ # Calculate dimensions to crop to the center
58
+ new_dimension = min(width, height)
59
+ left = (width - new_dimension)/2
60
+ top = (height - new_dimension)/2
61
+ right = (width + new_dimension)/2
62
+ bottom = (height + new_dimension)/2
63
+
64
+ # Crop and resize
65
+ img = img.crop((left, top, right, bottom))
66
+ img = img.resize(output_size)
67
+
68
+ return img
69
+
70
+ def common_upscale(samples, width, height, upscale_method, crop=False):
71
+ if crop == "center":
72
+ old_width = samples.shape[3]
73
+ old_height = samples.shape[2]
74
+ old_aspect = old_width / old_height
75
+ new_aspect = width / height
76
+ x = 0
77
+ y = 0
78
+ if old_aspect > new_aspect:
79
+ x = round((old_width - old_width * (new_aspect / old_aspect)) / 2)
80
+ elif old_aspect < new_aspect:
81
+ y = round((old_height - old_height * (old_aspect / new_aspect)) / 2)
82
+ s = samples[:,:,y:old_height-y,x:old_width-x]
83
+ else:
84
+ s = samples
85
+
86
+ return torch.nn.functional.interpolate(s, size=(height, width), mode=upscale_method)
87
+
88
+ def upscale(samples, upscale_method, scale_by):
89
+ #s = samples.copy()
90
+ width = round(samples["images"].shape[3] * scale_by)
91
+ height = round(samples["images"].shape[2] * scale_by)
92
+ s = common_upscale(samples["images"], width, height, upscale_method, "disabled")
93
+ return (s)
94
+
95
+ def check_inputs(prompt: str, control_image: Image.Image):
96
+ if control_image is None:
97
+ raise gr.Error("Please select or upload an Input Illusion")
98
+ if prompt is None or prompt == "":
99
+ raise gr.Error("Prompt is required")
100
+
101
+ def convert_to_pil(base64_image):
102
+ pil_image = processing_utils.decode_base64_to_image(base64_image)
103
+ return pil_image
104
+
105
+ def convert_to_base64(pil_image):
106
+ base64_image = processing_utils.encode_pil_to_base64(pil_image)
107
+ return base64_image
108
+
109
+ # Inference function
110
+ def inference(
111
+ control_image: Image.Image,
112
+ prompt: str,
113
+ negative_prompt: str,
114
+ guidance_scale: float = 8.0,
115
+ controlnet_conditioning_scale: float = 1,
116
+ control_guidance_start: float = 1,
117
+ control_guidance_end: float = 1,
118
+ upscaler_strength: float = 0.5,
119
+ seed: int = -1,
120
+ sampler = "DPM++ Karras SDE",
121
+ progress = gr.Progress(track_tqdm=True),
122
+ profile: gr.OAuthProfile | None = None,
123
+ ):
124
+ start_time = time.time()
125
+ start_time_struct = time.localtime(start_time)
126
+ start_time_formatted = time.strftime("%H:%M:%S", start_time_struct)
127
+ print(f"Inference started at {start_time_formatted}")
128
+
129
+ # Generate the initial image
130
+ #init_image = init_pipe(prompt).images[0]
131
+
132
+ # Rest of your existing code
133
+ control_image_small = center_crop_resize(control_image)
134
+ control_image_large = center_crop_resize(control_image, (1024, 1024))
135
+
136
+ main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
137
+ my_seed = random.randint(0, 2**32 - 1) if seed == -1 else seed
138
+ generator = torch.Generator(device="cuda").manual_seed(my_seed)
139
+
140
+ out = main_pipe(
141
+ prompt=prompt,
142
+ negative_prompt=negative_prompt,
143
+ image=control_image_small,
144
+ guidance_scale=float(guidance_scale),
145
+ controlnet_conditioning_scale=float(controlnet_conditioning_scale),
146
+ generator=generator,
147
+ control_guidance_start=float(control_guidance_start),
148
+ control_guidance_end=float(control_guidance_end),
149
+ num_inference_steps=15,
150
+ output_type="latent"
151
+ )
152
+ upscaled_latents = upscale(out, "nearest-exact", 2)
153
+ out_image = image_pipe(
154
+ prompt=prompt,
155
+ negative_prompt=negative_prompt,
156
+ control_image=control_image_large,
157
+ image=upscaled_latents,
158
+ guidance_scale=float(guidance_scale),
159
+ generator=generator,
160
+ num_inference_steps=20,
161
+ strength=upscaler_strength,
162
+ control_guidance_start=float(control_guidance_start),
163
+ control_guidance_end=float(control_guidance_end),
164
+ controlnet_conditioning_scale=float(controlnet_conditioning_scale)
165
+ )
166
+ end_time = time.time()
167
+ end_time_struct = time.localtime(end_time)
168
+ end_time_formatted = time.strftime("%H:%M:%S", end_time_struct)
169
+ print(f"Inference ended at {end_time_formatted}, taking {end_time-start_time}s")
170
+
171
+ # Save image + metadata
172
+ user_history.save_image(
173
+ label=prompt,
174
+ image=out_image["images"][0],
175
+ profile=profile,
176
+ metadata={
177
+ "prompt": prompt,
178
+ "negative_prompt": negative_prompt,
179
+ "guidance_scale": guidance_scale,
180
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
181
+ "control_guidance_start": control_guidance_start,
182
+ "control_guidance_end": control_guidance_end,
183
+ "upscaler_strength": upscaler_strength,
184
+ "seed": seed,
185
+ "sampler": sampler,
186
+ },
187
+ )
188
+
189
+ return out_image["images"][0], gr.update(visible=True), gr.update(visible=True), my_seed