Spaces:
Running
on
Zero
Running
on
Zero
SunderAli17
commited on
Commit
•
8a7c697
1
Parent(s):
f05464b
Create functions/app_with_diffusers.py
Browse files- functions/app_with_diffusers.py +125 -0
functions/app_with_diffusers.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import hf_hub_download
|
2 |
+
|
3 |
+
hf_hub_download(repo_id="SunderAli17/SAKBIR", filename="models/adapter.pt", local_dir=".")
|
4 |
+
hf_hub_download(repo_id="SunderAli17/SAKBIR", filename="models/aggregator.pt", local_dir=".")
|
5 |
+
hf_hub_download(repo_id="SunderAli17/SAKBIR", filename="models/previewer_lora_weights.bin", local_dir=".")
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from PIL import Image
|
9 |
+
|
10 |
+
from diffusers import DDPMScheduler
|
11 |
+
from pipeline.lcm_single_step_scheduler import LCMSingleStepScheduler
|
12 |
+
|
13 |
+
from module.ip_adapter.utils import load_adapter_to_pipe
|
14 |
+
from pipelines.sdxl_SAKBIR import SAKBIRPipeline
|
15 |
+
|
16 |
+
def resize_img(input_image, max_side=1280, min_side=1024, size=None,
|
17 |
+
pad_to_max_side=False, mode=Image.BILINEAR, base_pixel_number=64):
|
18 |
+
|
19 |
+
w, h = input_image.size
|
20 |
+
if size is not None:
|
21 |
+
w_resize_new, h_resize_new = size
|
22 |
+
else:
|
23 |
+
# ratio = min_side / min(h, w)
|
24 |
+
# w, h = round(ratio*w), round(ratio*h)
|
25 |
+
ratio = max_side / max(h, w)
|
26 |
+
input_image = input_image.resize([round(ratio*w), round(ratio*h)], mode)
|
27 |
+
w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number
|
28 |
+
h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number
|
29 |
+
input_image = input_image.resize([w_resize_new, h_resize_new], mode)
|
30 |
+
|
31 |
+
if pad_to_max_side:
|
32 |
+
res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
|
33 |
+
offset_x = (max_side - w_resize_new) // 2
|
34 |
+
offset_y = (max_side - h_resize_new) // 2
|
35 |
+
res[offset_y:offset_y+h_resize_new, offset_x:offset_x+w_resize_new] = np.array(input_image)
|
36 |
+
input_image = Image.fromarray(res)
|
37 |
+
return input_image
|
38 |
+
|
39 |
+
# prepare models under ./models
|
40 |
+
instantir_path = f'./models'
|
41 |
+
|
42 |
+
# load pretrained models
|
43 |
+
pipe = InstantIRPipeline.from_pretrained('stabilityai/stable-diffusion-xl-base-1.0', torch_dtype=torch.float16)
|
44 |
+
|
45 |
+
# load adapter
|
46 |
+
load_adapter_to_pipe(
|
47 |
+
pipe,
|
48 |
+
f"{instantir_path}/adapter.pt",
|
49 |
+
image_encoder_or_path = 'facebook/dinov2-large',
|
50 |
+
)
|
51 |
+
|
52 |
+
# load previewer lora
|
53 |
+
pipe.prepare_previewers(instantir_path)
|
54 |
+
pipe.scheduler = DDPMScheduler.from_pretrained('stabilityai/stable-diffusion-xl-base-1.0', subfolder="scheduler")
|
55 |
+
lcm_scheduler = LCMSingleStepScheduler.from_config(pipe.scheduler.config)
|
56 |
+
|
57 |
+
# load aggregator weights
|
58 |
+
pretrained_state_dict = torch.load(f"{instantir_path}/aggregator.pt")
|
59 |
+
pipe.aggregator.load_state_dict(pretrained_state_dict)
|
60 |
+
|
61 |
+
# send to GPU and fp16
|
62 |
+
pipe.to(device='cuda', dtype=torch.float16)
|
63 |
+
pipe.aggregator.to(device='cuda', dtype=torch.float16)
|
64 |
+
|
65 |
+
PROMPT = "Photorealistic, highly detailed, hyper detailed photo - realistic maximum detail, 32k, \
|
66 |
+
ultra HD, extreme meticulous detailing, skin pore detailing, \
|
67 |
+
hyper sharpness, perfect without deformations, \
|
68 |
+
taken using a Canon EOS R camera, Cinematic, High Contrast, Color Grading. "
|
69 |
+
|
70 |
+
NEG_PROMPT = "blurry, out of focus, unclear, depth of field, over-smooth, \
|
71 |
+
sketch, oil painting, cartoon, CG Style, 3D render, unreal engine, \
|
72 |
+
dirty, messy, worst quality, low quality, frames, painting, illustration, drawing, art, \
|
73 |
+
watermark, signature, jpeg artifacts, deformed, lowres"
|
74 |
+
|
75 |
+
def infer(prompt, input_image, steps=30, cfg_scale=7.0, guidance_end=1.0,
|
76 |
+
creative_restoration=False, seed=3407, height=1024, width=1024):
|
77 |
+
|
78 |
+
|
79 |
+
# load a broken image
|
80 |
+
low_quality_image = Image.open(input_image).convert("RGB")
|
81 |
+
|
82 |
+
lq = [resize_img(low_quality_image, size=(width, height))]
|
83 |
+
generator = torch.Generator(device='cuda').manual_seed(seed)
|
84 |
+
timesteps = [
|
85 |
+
i * (1000//steps) + pipe.scheduler.config.steps_offset for i in range(0, steps)
|
86 |
+
]
|
87 |
+
timesteps = timesteps[::-1]
|
88 |
+
|
89 |
+
prompt = PROMPT if len(prompt)==0 else prompt
|
90 |
+
neg_prompt = NEG_PROMPT
|
91 |
+
|
92 |
+
# InstantIR restoration
|
93 |
+
image = pipe(
|
94 |
+
prompt=[prompt]*len(lq),
|
95 |
+
image=lq,
|
96 |
+
num_inference_steps=steps,
|
97 |
+
generator=generator,
|
98 |
+
timesteps=timesteps,
|
99 |
+
negative_prompt=[neg_prompt]*len(lq),
|
100 |
+
guidance_scale=cfg_scale,
|
101 |
+
previewer_scheduler=lcm_scheduler,
|
102 |
+
).images[0]
|
103 |
+
|
104 |
+
return image
|
105 |
+
|
106 |
+
import gradio as gr
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
with gr.Blocks() as demo:
|
111 |
+
with gr.Column():
|
112 |
+
with gr.Row():
|
113 |
+
with gr.Column():
|
114 |
+
lq_img = gr.Image(label="Low-quality image", type="filepath")
|
115 |
+
with gr.Group():
|
116 |
+
prompt = gr.Textbox(label="Prompt", value="")
|
117 |
+
|
118 |
+
submit_btn = gr.Button("InstantIR magic!")
|
119 |
+
output_img = gr.Image(label="InstantIR restored")
|
120 |
+
submit_btn.click(
|
121 |
+
fn=infer,
|
122 |
+
inputs=[prompt, lq_img],
|
123 |
+
outputs=[output_img]
|
124 |
+
)
|
125 |
+
demo.launch(show_error=True)
|