zwl commited on
Commit
dd6aa4e
1 Parent(s): 0b4f323

update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -10
app.py CHANGED
@@ -4,7 +4,18 @@ import torch
4
  from PIL import Image
5
  import os
6
 
7
-
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  class Model:
10
  def __init__(self, name, path, prefix):
@@ -32,8 +43,8 @@ if torch.cuda.is_available():
32
  for model in models:
33
  try:
34
  unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16, use_auth_token=auth_token)
35
- model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, use_auth_token=auth_token)
36
- model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, use_auth_token=auth_token)
37
  except:
38
  models.remove(model)
39
  pipe = models[0].pipe_t2i
@@ -78,7 +89,6 @@ def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, g
78
 
79
  pipe.to("cpu")
80
  pipe = current_model.pipe_t2i
81
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
82
 
83
  if torch.cuda.is_available():
84
  pipe = pipe.to("cuda")
@@ -87,8 +97,12 @@ def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, g
87
  prompt = current_model.prefix + prompt
88
  result = pipe(
89
  prompt,
 
 
90
  num_inference_steps = int(steps),
91
  guidance_scale = guidance,
 
 
92
  generator = generator)
93
 
94
  return replace_nsfw_images(result)
@@ -103,7 +117,6 @@ def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, w
103
 
104
  pipe.to("cpu")
105
  pipe = current_model.pipe_i2i
106
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
107
 
108
  if torch.cuda.is_available():
109
  pipe = pipe.to("cuda")
@@ -224,7 +237,6 @@ with gr.Blocks(css=css) as demo:
224
  with gr.Tab("Options"):
225
  with gr.Group():
226
  neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
227
- # neg_prompt = ""
228
 
229
  # n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1)
230
 
@@ -233,8 +245,8 @@ with gr.Blocks(css=css) as demo:
233
  steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=100, step=1)
234
 
235
  with gr.Row():
236
- width = gr.Slider(label="Width", value=512, minimum=64, maximum=512, step=8)
237
- height = gr.Slider(label="Height", value=512, minimum=64, maximum=512, step=8)
238
 
239
  seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
240
 
@@ -245,7 +257,7 @@ with gr.Blocks(css=css) as demo:
245
 
246
  # model_name.change(lambda x: gr.update(visible = x == models[0].name), inputs=model_name, outputs=custom_model_group)
247
 
248
- inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength]
249
  prompt.submit(inference, inputs=inputs, outputs=image_out)
250
 
251
  generate.click(inference, inputs=inputs, outputs=image_out)
@@ -259,4 +271,4 @@ with gr.Blocks(css=css) as demo:
259
  ''')
260
 
261
  demo.queue(concurrency_count=1)
262
- demo.launch(debug=False, share=False)
 
4
  from PIL import Image
5
  import os
6
 
7
+ scheduler = UniPCMultistepScheduler(
8
+ beta_start=0.00085,
9
+ beta_end=0.012,
10
+ beta_schedule="scaled_linear",
11
+ num_train_timesteps=1000,
12
+ trained_betas=None,
13
+ predict_epsilon=True,
14
+ thresholding=False,
15
+ predict_x0=True,
16
+ solver_type="bh2",
17
+ lower_order_final=True,
18
+ )
19
 
20
  class Model:
21
  def __init__(self, name, path, prefix):
 
43
  for model in models:
44
  try:
45
  unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16, use_auth_token=auth_token)
46
+ model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler, use_auth_token=auth_token)
47
+ model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler, use_auth_token=auth_token)
48
  except:
49
  models.remove(model)
50
  pipe = models[0].pipe_t2i
 
89
 
90
  pipe.to("cpu")
91
  pipe = current_model.pipe_t2i
 
92
 
93
  if torch.cuda.is_available():
94
  pipe = pipe.to("cuda")
 
97
  prompt = current_model.prefix + prompt
98
  result = pipe(
99
  prompt,
100
+ negative_prompt = neg_prompt,
101
+ # num_images_per_prompt=n_images,
102
  num_inference_steps = int(steps),
103
  guidance_scale = guidance,
104
+ width = width,
105
+ height = height,
106
  generator = generator)
107
 
108
  return replace_nsfw_images(result)
 
117
 
118
  pipe.to("cpu")
119
  pipe = current_model.pipe_i2i
 
120
 
121
  if torch.cuda.is_available():
122
  pipe = pipe.to("cuda")
 
237
  with gr.Tab("Options"):
238
  with gr.Group():
239
  neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
 
240
 
241
  # n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1)
242
 
 
245
  steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=100, step=1)
246
 
247
  with gr.Row():
248
+ width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
249
+ height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
250
 
251
  seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
252
 
 
257
 
258
  # model_name.change(lambda x: gr.update(visible = x == models[0].name), inputs=model_name, outputs=custom_model_group)
259
 
260
+ inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt]
261
  prompt.submit(inference, inputs=inputs, outputs=image_out)
262
 
263
  generate.click(inference, inputs=inputs, outputs=image_out)
 
271
  ''')
272
 
273
  demo.queue(concurrency_count=1)
274
+ demo.launch(debug=False, share=False)