Anonymous commited on
Commit
91fac5d
·
1 Parent(s): 1b39655

load in advance

Browse files
Files changed (1) hide show
  1. app.py +12 -17
app.py CHANGED
@@ -15,15 +15,17 @@ model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
15
  model_ckpt_turbo = "stabilityai/sdxl-turbo"
16
  pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=dtype).to(device)
17
  pipe_turbo = StableDiffusionXLPipeline_Turbo.from_pretrained(model_ckpt_turbo, torch_dtype=dtype).to(device)
 
 
 
 
18
  torch.cuda.empty_cache()
19
 
20
  @spaces.GPU(duration=120)
21
- def infer_gpu_part(seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps):
22
  generator = torch.Generator(device='cuda')
23
  generator = generator.manual_seed(seed)
24
- if not disable_freeu:
25
- register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
26
- register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
27
  result = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
28
  num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
29
  resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
@@ -31,13 +33,11 @@ def infer_gpu_part(seed, prompt, negative_prompt, ddim_steps, guidance_scale, re
31
  ).images[0]
32
  return result
33
 
34
- @spaces.GPU(duration=40)
35
- def infer_gpu_part_turbo(seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps):
36
  generator = torch.Generator(device='cuda')
37
  generator = generator.manual_seed(seed)
38
- if not disable_freeu:
39
- register_free_upblock2d(pipe_turbo, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
40
- register_free_crossattn_upblock2d(pipe_turbo, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
41
  result = pipe_turbo(prompt, negative_prompt=negative_prompt, generator=generator,
42
  num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
43
  resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
@@ -51,7 +51,6 @@ def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, o
51
  print(negative_prompt)
52
 
53
  disable_turbo = 'Disable Turbo' in options
54
- disable_freeu = 'Disable FreeU' in options
55
 
56
  if disable_turbo:
57
  fast_mode = True
@@ -66,9 +65,7 @@ def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, o
66
  [2048, 1024]]
67
  restart_steps = [int(ddim_steps * 0.3)]
68
 
69
- # print('GPU starts')
70
- result = infer_gpu_part(seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps)
71
- # print('GPU ends')
72
 
73
  else:
74
  fast_mode = False
@@ -86,9 +83,7 @@ def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, o
86
  [2048, 1024]]
87
  restart_steps = [int(ddim_steps * 0.5)] * 2
88
 
89
- # print('GPU starts')
90
- result = infer_gpu_part_turbo(seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps)
91
- # print('GPU ends')
92
 
93
  return result
94
 
@@ -220,7 +215,7 @@ with gr.Blocks(css=css) as demo:
220
  with gr.Accordion('Advanced Settings', open=False):
221
  with gr.Row():
222
  output_size = gr.Dropdown(["2048 x 2048", "1024 x 2048", "2048 x 1024"], value="2048 x 2048", label="Output Size (H x W)", info="Due to GPU constraints, run the demo locally for higher resolutions.", scale=3)
223
- options = gr.CheckboxGroup(['Disable Turbo', 'Disable FreeU'], label="Options", info="NOT recommended to change", scale=2)
224
  with gr.Row():
225
  ddim_steps = gr.Slider(label='DDIM Steps',
226
  minimum=2,
 
15
  model_ckpt_turbo = "stabilityai/sdxl-turbo"
16
  pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=dtype).to(device)
17
  pipe_turbo = StableDiffusionXLPipeline_Turbo.from_pretrained(model_ckpt_turbo, torch_dtype=dtype).to(device)
18
+ register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
19
+ register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
20
+ register_free_upblock2d(pipe_turbo, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
21
+ register_free_crossattn_upblock2d(pipe_turbo, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
22
  torch.cuda.empty_cache()
23
 
24
  @spaces.GPU(duration=120)
25
+ def infer_gpu_part(seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, restart_steps):
26
  generator = torch.Generator(device='cuda')
27
  generator = generator.manual_seed(seed)
28
+
 
 
29
  result = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
30
  num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
31
  resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
 
33
  ).images[0]
34
  return result
35
 
36
+ @spaces.GPU(duration=30)
37
+ def infer_gpu_part_turbo(seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, restart_steps):
38
  generator = torch.Generator(device='cuda')
39
  generator = generator.manual_seed(seed)
40
+
 
 
41
  result = pipe_turbo(prompt, negative_prompt=negative_prompt, generator=generator,
42
  num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
43
  resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
 
51
  print(negative_prompt)
52
 
53
  disable_turbo = 'Disable Turbo' in options
 
54
 
55
  if disable_turbo:
56
  fast_mode = True
 
65
  [2048, 1024]]
66
  restart_steps = [int(ddim_steps * 0.3)]
67
 
68
+ result = infer_gpu_part(seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, restart_steps)
 
 
69
 
70
  else:
71
  fast_mode = False
 
83
  [2048, 1024]]
84
  restart_steps = [int(ddim_steps * 0.5)] * 2
85
 
86
+ result = infer_gpu_part_turbo(seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, restart_steps)
 
 
87
 
88
  return result
89
 
 
215
  with gr.Accordion('Advanced Settings', open=False):
216
  with gr.Row():
217
  output_size = gr.Dropdown(["2048 x 2048", "1024 x 2048", "2048 x 1024"], value="2048 x 2048", label="Output Size (H x W)", info="Due to GPU constraints, run the demo locally for higher resolutions.", scale=3)
218
+ options = gr.CheckboxGroup(['Disable Turbo'], label="Options", info="NOT recommended to change", scale=2)
219
  with gr.Row():
220
  ddim_steps = gr.Slider(label='DDIM Steps',
221
  minimum=2,