jbilcke-hf HF staff commited on
Commit
7db3ca3
1 Parent(s): 392ea42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -124
app.py CHANGED
@@ -1,15 +1,12 @@
1
  #!/usr/bin/env python
2
 
3
- from __future__ import annotations
4
-
5
  import os
6
  import random
7
-
8
  import gradio as gr
9
  import numpy as np
10
  import PIL.Image
11
  import torch
12
- from diffusers import DiffusionPipeline
13
 
14
  DESCRIPTION = 'This space is an API service meant to be used by VideoChain and VideoQuest.\nWant to use this space for yourself? Please use the original code: [https://huggingface.co/spaces/hysts/SD-XL](https://huggingface.co/spaces/hysts/SD-XL)'
15
  if not torch.cuda.is_available():
@@ -17,37 +14,27 @@ if not torch.cuda.is_available():
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
  MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
20
- USE_TORCH_COMPILE = os.getenv('USE_TORCH_COMPILE') == '1'
21
- ENABLE_CPU_OFFLOAD = os.getenv('ENABLE_CPU_OFFLOAD') == '1'
22
  SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'default_secret')
23
 
24
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
25
  if torch.cuda.is_available():
26
- pipe = DiffusionPipeline.from_pretrained(
27
- 'stabilityai/stable-diffusion-xl-base-1.0',
28
- torch_dtype=torch.float16,
29
- use_safetensors=True,
30
- variant='fp16')
31
- refiner = DiffusionPipeline.from_pretrained(
32
- 'stabilityai/stable-diffusion-xl-refiner-1.0',
33
  torch_dtype=torch.float16,
34
- use_safetensors=True,
35
- variant='fp16')
36
 
37
- if ENABLE_CPU_OFFLOAD:
38
- pipe.enable_model_cpu_offload()
39
- refiner.enable_model_cpu_offload()
40
- else:
41
- pipe.to(device)
42
- refiner.to(device)
43
 
44
- if USE_TORCH_COMPILE:
45
- pipe.unet = torch.compile(pipe.unet,
46
- mode='reduce-overhead',
47
- fullgraph=True)
48
  else:
49
  pipe = None
50
- refiner = None
51
 
52
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
53
  if randomize_seed:
@@ -65,11 +52,8 @@ def generate(prompt: str,
65
  seed: int = 0,
66
  width: int = 1024,
67
  height: int = 1024,
68
- guidance_scale_base: float = 5.0,
69
- guidance_scale_refiner: float = 5.0,
70
- num_inference_steps_base: int = 50,
71
- num_inference_steps_refiner: int = 50,
72
- apply_refiner: bool = False,
73
  secret_token: str = '') -> PIL.Image.Image:
74
  if secret_token != SECRET_TOKEN:
75
  raise gr.Error(
@@ -84,37 +68,16 @@ def generate(prompt: str,
84
  if not use_negative_prompt_2:
85
  negative_prompt_2 = None # type: ignore
86
 
87
- if not apply_refiner:
88
- return pipe(prompt=prompt,
89
- negative_prompt=negative_prompt,
90
- prompt_2=prompt_2,
91
- negative_prompt_2=negative_prompt_2,
92
- width=width,
93
- height=height,
94
- guidance_scale=guidance_scale_base,
95
- num_inference_steps=num_inference_steps_base,
96
- generator=generator,
97
- output_type='pil').images[0]
98
- else:
99
- latents = pipe(prompt=prompt,
100
- negative_prompt=negative_prompt,
101
- prompt_2=prompt_2,
102
- negative_prompt_2=negative_prompt_2,
103
- width=width,
104
- height=height,
105
- guidance_scale=guidance_scale_base,
106
- num_inference_steps=num_inference_steps_base,
107
- generator=generator,
108
- output_type='latent').images
109
- image = refiner(prompt=prompt,
110
- negative_prompt=negative_prompt,
111
- prompt_2=prompt_2,
112
- negative_prompt_2=negative_prompt_2,
113
- guidance_scale=guidance_scale_refiner,
114
- num_inference_steps=num_inference_steps_refiner,
115
- image=latents,
116
- generator=generator).images[0]
117
- return image
118
 
119
  with gr.Blocks(css='style.css') as demo:
120
  gr.Markdown(DESCRIPTION)
@@ -181,61 +144,33 @@ with gr.Blocks(css='style.css') as demo:
181
  step=32,
182
  value=1024,
183
  )
184
- apply_refiner = gr.Checkbox(label='Apply refiner', value=False)
185
- with gr.Row():
186
- guidance_scale_base = gr.Slider(
187
- label='Guidance scale for base',
188
- minimum=1,
189
- maximum=20,
190
- step=0.1,
191
- value=5.0)
192
- num_inference_steps_base = gr.Slider(
193
- label='Number of inference steps for base',
194
- minimum=10,
195
- maximum=100,
196
- step=1,
197
- value=50)
198
- with gr.Row(visible=False) as refiner_params:
199
- guidance_scale_refiner = gr.Slider(
200
- label='Guidance scale for refiner',
201
- minimum=1,
202
- maximum=20,
203
- step=0.1,
204
- value=5.0)
205
- num_inference_steps_refiner = gr.Slider(
206
- label='Number of inference steps for refiner',
207
- minimum=10,
208
- maximum=100,
209
- step=1,
210
- value=50)
211
 
212
  use_negative_prompt.change(
213
  fn=lambda x: gr.update(visible=x),
214
  inputs=use_negative_prompt,
215
- outputs=negative_prompt,
216
- queue=False,
217
- api_name=False,
218
  )
219
  use_prompt_2.change(
220
  fn=lambda x: gr.update(visible=x),
221
  inputs=use_prompt_2,
222
- outputs=prompt_2,
223
- queue=False,
224
- api_name=False,
225
  )
226
  use_negative_prompt_2.change(
227
  fn=lambda x: gr.update(visible=x),
228
  inputs=use_negative_prompt_2,
229
- outputs=negative_prompt_2,
230
- queue=False,
231
- api_name=False,
232
- )
233
- apply_refiner.change(
234
- fn=lambda x: gr.update(visible=x),
235
- inputs=apply_refiner,
236
- outputs=refiner_params,
237
- queue=False,
238
- api_name=False,
239
  )
240
 
241
  inputs = [
@@ -249,19 +184,14 @@ with gr.Blocks(css='style.css') as demo:
249
  seed,
250
  width,
251
  height,
252
- guidance_scale_base,
253
- guidance_scale_refiner,
254
- num_inference_steps_base,
255
- num_inference_steps_refiner,
256
- apply_refiner,
257
  secret_token,
258
  ]
259
  prompt.submit(
260
  fn=randomize_seed_fn,
261
  inputs=[seed, randomize_seed],
262
- outputs=seed,
263
- queue=False,
264
- api_name=False,
265
  ).then(
266
  fn=generate,
267
  inputs=inputs,
@@ -271,25 +201,19 @@ with gr.Blocks(css='style.css') as demo:
271
  negative_prompt.submit(
272
  fn=randomize_seed_fn,
273
  inputs=[seed, randomize_seed],
274
- outputs=seed,
275
- queue=False,
276
- api_name=False,
277
  ).then(
278
  fn=generate,
279
  inputs=inputs,
280
- outputs=result,
281
- api_name=False,
282
  )
283
  run_button.click(
284
  fn=randomize_seed_fn,
285
  inputs=[seed, randomize_seed],
286
- outputs=seed,
287
- queue=False,
288
- api_name=False,
289
  ).then(
290
  fn=generate,
291
  inputs=inputs,
292
- outputs=result,
293
- api_name=False,
294
  )
295
- demo.queue(max_size=6).launch()
 
1
  #!/usr/bin/env python
2
 
 
 
3
  import os
4
  import random
 
5
  import gradio as gr
6
  import numpy as np
7
  import PIL.Image
8
  import torch
9
+ from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler
10
 
11
  DESCRIPTION = 'This space is an API service meant to be used by VideoChain and VideoQuest.\nWant to use this space for yourself? Please use the original code: [https://huggingface.co/spaces/hysts/SD-XL](https://huggingface.co/spaces/hysts/SD-XL)'
12
  if not torch.cuda.is_available():
 
14
 
15
  MAX_SEED = np.iinfo(np.int32).max
16
  MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
 
 
17
  SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'default_secret')
18
 
19
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
20
  if torch.cuda.is_available():
21
+ unet = UNet2DConditionModel.from_pretrained(
22
+ "latent-consistency/lcm-ssd-1b",
 
 
 
 
 
23
  torch_dtype=torch.float16,
24
+ variant="fp16"
25
+ )
26
 
27
+ pipe = DiffusionPipeline.from_pretrained(
28
+ "segmind/SSD-1B",
29
+ unet=unet,
30
+ torch_dtype=torch.float16,
31
+ variant="fp16"
32
+ )
33
 
34
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
35
+ pipe.to(device)
 
 
36
  else:
37
  pipe = None
 
38
 
39
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
40
  if randomize_seed:
 
52
  seed: int = 0,
53
  width: int = 1024,
54
  height: int = 1024,
55
+ guidance_scale: float = 1.0,
56
+ num_inference_steps: int = 4,
 
 
 
57
  secret_token: str = '') -> PIL.Image.Image:
58
  if secret_token != SECRET_TOKEN:
59
  raise gr.Error(
 
68
  if not use_negative_prompt_2:
69
  negative_prompt_2 = None # type: ignore
70
 
71
+ return pipe(prompt=prompt,
72
+ negative_prompt=negative_prompt,
73
+ prompt_2=prompt_2,
74
+ negative_prompt_2=negative_prompt_2,
75
+ width=width,
76
+ height=height,
77
+ guidance_scale=guidance_scale,
78
+ num_inference_steps=num_inference_steps,
79
+ generator=generator,
80
+ output_type='pil').images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
  with gr.Blocks(css='style.css') as demo:
83
  gr.Markdown(DESCRIPTION)
 
144
  step=32,
145
  value=1024,
146
  )
147
+ guidance_scale = gr.Slider(
148
+ label='Guidance scale',
149
+ minimum=1,
150
+ maximum=20,
151
+ step=0.1,
152
+ value=1.0)
153
+ num_inference_steps = gr.Slider(
154
+ label='Number of inference steps',
155
+ minimum=2,
156
+ maximum=8,
157
+ step=1,
158
+ value=4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
 
160
  use_negative_prompt.change(
161
  fn=lambda x: gr.update(visible=x),
162
  inputs=use_negative_prompt,
163
+ outputs=negative_prompt
 
 
164
  )
165
  use_prompt_2.change(
166
  fn=lambda x: gr.update(visible=x),
167
  inputs=use_prompt_2,
168
+ outputs=prompt_2
 
 
169
  )
170
  use_negative_prompt_2.change(
171
  fn=lambda x: gr.update(visible=x),
172
  inputs=use_negative_prompt_2,
173
+ outputs=negative_prompt_2
 
 
 
 
 
 
 
 
 
174
  )
175
 
176
  inputs = [
 
184
  seed,
185
  width,
186
  height,
187
+ guidance_scale,
188
+ num_inference_steps,
 
 
 
189
  secret_token,
190
  ]
191
  prompt.submit(
192
  fn=randomize_seed_fn,
193
  inputs=[seed, randomize_seed],
194
+ outputs=seed
 
 
195
  ).then(
196
  fn=generate,
197
  inputs=inputs,
 
201
  negative_prompt.submit(
202
  fn=randomize_seed_fn,
203
  inputs=[seed, randomize_seed],
204
+ outputs=seed
 
 
205
  ).then(
206
  fn=generate,
207
  inputs=inputs,
208
+ outputs=result
 
209
  )
210
  run_button.click(
211
  fn=randomize_seed_fn,
212
  inputs=[seed, randomize_seed],
213
+ outputs=seed
 
 
214
  ).then(
215
  fn=generate,
216
  inputs=inputs,
217
+ outputs=result
 
218
  )
219
+ demo.queue(max_size=6).launch()