File size: 1,294 Bytes
70546b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e38f741
 
 
 
70546b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import gradio as gr
from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
from diffusers.utils import load_image
import torch

if torch.cuda.is_available():
  device = "cuda"
elif torch.backends.mps.is_available():
  device = "mps"
else:
  device = "cpu"


pipes = {
  "txt2img": AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to(device),
  "img2img": AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to(device)
}

if device == "cpu":
  pipes["txt2img"].enable_model_cpu_offload()
  pipes["img2img"].enable_model_cpu_offload()


def run(prompt, image):
  print(f"prompt={prompt}, image={image}")
  if image is None:
    return pipes["txt2img"](prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
  else:
    image = image.resize((512,512))
    print(f"img2img image={image}")
    return pipes["img2img"](prompt, image=image, num_inference_steps=2, strength=0.5, guidance_scale=0.0).images[0]

demo = gr.Interface(
    run,
    inputs=[
      gr.Textbox(label="Prompt"),
      gr.Image(type="pil")
    ],
    outputs=gr.Image(width=512,height=512),
    live=True
)
#demo.dependencies[0]["show_progress"] = "minimal"
demo.launch()