multimodalart HF staff commited on
Commit
5f8a996
1 Parent(s): d51771a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -0
app.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from hidiffusion import apply_hidiffusion, remove_hidiffusion
2
+ from diffusers import DiffusionPipeline, DDIMScheduler, AutoencoderKL
3
+ import torch
4
+ import spaces
5
+
6
+ vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse")
7
+ safety_checker=StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker"),
8
+ feature_extractor=CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
9
+ pretrain_model = "SG161222/Realistic_Vision_V5.1_noVAE"
10
+ scheduler = DDIMScheduler.from_pretrained(pretrain_model, subfolder="scheduler")
11
+ pipe = DiffusionPipeline.from_pretrained(pretrain_model, scheduler = scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, vae=vae, torch_dtype=torch.float16).to("cuda")
12
+
13
+ # # Optional. enable_xformers_memory_efficient_attention can save memory usage and increase inference speed. enable_model_cpu_offload and enable_vae_tiling can save memory usage.
14
+ #pipe.enable_model_cpu_offload()
15
+ #pipe.enable_vae_tiling()
16
+
17
+ # Apply hidiffusion with a single line of code.
18
+ apply_hidiffusion(pipe)
19
+
20
+ with gr.Blocks() as demo:
21
+ prompt = gr.Textbox()
22
+ negative_prompt = gr.Textbox()
23
+ btn = gr.Button("Run")
24
+ output = gr.Image()
25
+
26
+ btn.click(fn=run_hidiffusion, inputs=[prompt, negative_prompt], outputs=[output])
27
+
28
+ @spaces.GPU
29
+ def run_hidiffusion(prompt, negative_prompt):
30
+ return pipe(prompt, guidance_scale=7.5, height=1024, width=1024, eta=1.0, negative_prompt=negative_prompt).images[0]