anzorq commited on
Commit
45d8fb2
1 Parent(s): 2bc439a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -8
app.py CHANGED
@@ -81,6 +81,10 @@ if torch.cuda.is_available():
81
 
82
  device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
83
 
 
 
 
 
84
  def custom_model_changed(path):
85
  models[0].path = path
86
  global current_model
@@ -102,10 +106,13 @@ def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0
102
 
103
  generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
104
 
105
- if img is not None:
106
- return img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator)
107
- else:
108
- return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator)
 
 
 
109
 
110
  def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator):
111
 
@@ -224,6 +231,7 @@ with gr.Blocks(css=css) as demo:
224
  # gallery = gr.Gallery(
225
  # label="Generated images", show_label=False, elem_id="gallery"
226
  # ).style(grid=[1], height="auto")
 
227
 
228
  with gr.Column(scale=45):
229
  with gr.Tab("Options"):
@@ -253,8 +261,9 @@ with gr.Blocks(css=css) as demo:
253
  # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
254
 
255
  inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt]
256
- prompt.submit(inference, inputs=inputs, outputs=image_out)
257
- generate.click(inference, inputs=inputs, outputs=image_out)
 
258
 
259
  ex = gr.Examples([
260
  [models[7].name, "tiny cute and adorable kitten adventurer dressed in a warm overcoat with survival gear on a winters day", 7.5, 50],
@@ -262,7 +271,7 @@ with gr.Blocks(css=css) as demo:
262
  [models[5].name, "portrait of a beautiful alyx vance half life", 10, 50],
263
  [models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 45],
264
  [models[5].name, "fantasy portrait painting, digital art", 4.0, 30],
265
- ], [model_name, prompt, guidance, steps, seed], image_out, inference, cache_examples=False)
266
 
267
  gr.HTML("""
268
  <div style="border-top: 1px solid #303030;">
@@ -280,4 +289,3 @@ print(f"Space built in {time.time() - start_time:.2f} seconds")
280
  if not is_colab:
281
  demo.queue(concurrency_count=1)
282
  demo.launch(debug=is_colab, share=is_colab)
283
-
 
81
 
82
  device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
83
 
84
+ def error_str(error, title="Error"):
85
+ return f"""#### {title}
86
+ {error}""" if error else ""
87
+
88
  def custom_model_changed(path):
89
  models[0].path = path
90
  global current_model
 
106
 
107
  generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
108
 
109
+ try:
110
+ if img is not None:
111
+ return img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
112
+ else:
113
+ return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator), None
114
+ except Exception as e:
115
+ return None, error_str(e)
116
 
117
  def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator):
118
 
 
231
  # gallery = gr.Gallery(
232
  # label="Generated images", show_label=False, elem_id="gallery"
233
  # ).style(grid=[1], height="auto")
234
+ error_output = gr.Markdown()
235
 
236
  with gr.Column(scale=45):
237
  with gr.Tab("Options"):
 
261
  # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
262
 
263
  inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt]
264
+ outputs = [image_out, error_output]
265
+ prompt.submit(inference, inputs=inputs, outputs=outputs)
266
+ generate.click(inference, inputs=inputs, outputs=outputs)
267
 
268
  ex = gr.Examples([
269
  [models[7].name, "tiny cute and adorable kitten adventurer dressed in a warm overcoat with survival gear on a winters day", 7.5, 50],
 
271
  [models[5].name, "portrait of a beautiful alyx vance half life", 10, 50],
272
  [models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 45],
273
  [models[5].name, "fantasy portrait painting, digital art", 4.0, 30],
274
+ ], inputs=[model_name, prompt, guidance, steps, seed], outputs=outputs, fn=inference, cache_examples=False)
275
 
276
  gr.HTML("""
277
  <div style="border-top: 1px solid #303030;">
 
289
  if not is_colab:
290
  demo.queue(concurrency_count=1)
291
  demo.launch(debug=is_colab, share=is_colab)