|
import torch |
|
from diffusers import AutoencoderKL |
|
from diffusers import StableDiffusionUpscalePipeline |
|
from PIL import Image |
|
|
|
device = "cuda" |
|
seed = 100 |
|
def execute(input_image): |
|
model_id = "stabilityai/stable-diffusion-x4-upscaler" |
|
|
|
pipe = StableDiffusionUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16") |
|
pipe = pipe.to(device) |
|
pipe.enable_attention_slicing() |
|
|
|
pipe.enable_xformers_memory_efficient_attention() |
|
|
|
pipe.vae.enable_tiling() |
|
|
|
prompt = "beautiful girl" |
|
first_resize_w = 0 |
|
first_resize_h = 0 |
|
|
|
|
|
image = input_image |
|
low_res_img = image |
|
if first_resize_w!=0 and first_resize_h!=0: |
|
low_res_img = image.resize((first_resize_w, first_resize_h)) |
|
|
|
upscaled_image = upscale(pipe, prompt, low_res_img) |
|
return upscaled_image |
|
|
|
def upscale(pipe, prompt, img, step=50, guidance_scale=7.5): |
|
generator = torch.Generator(device).manual_seed(seed) |
|
return pipe(prompt=prompt,generator=generator, image=img, num_inference_steps=step, guidance_scale=guidance_scale).images[0] |
|
|
|
|
|
if __name__ == "__main__": |
|
image = Image.open("sample.jpg") |
|
upscaled_image = execute(image) |
|
upscaled_image.save("output.jpg") |
|
|
|
|