John6666 commited on
Commit
30e951c
·
verified ·
1 Parent(s): 32d3c16

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -4,7 +4,7 @@ import numpy as np
4
 
5
  # DiffuseCraft
6
  from dc import (infer, _infer, pass_result, get_diffusers_model_list, get_samplers, save_image_history,
7
- get_vaes, enable_model_recom_prompt, enable_diffusers_model_detail, extract_exif_data, esrgan_upscale, UPSCALER_KEYS,
8
  preset_quality, preset_styles, process_style_prompt, get_all_lora_tupled_list, update_loras, apply_lora_prompt,
9
  download_my_lora, search_civitai_lora, update_civitai_selection, select_civitai_lora, search_civitai_lora_json,
10
  get_t2i_model_info, get_civitai_tag, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
@@ -204,7 +204,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
204
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
205
  guidance_scale, num_inference_steps, model_name,
206
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
207
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type],
208
  outputs=[result],
209
  queue=True,
210
  show_progress="full",
@@ -217,7 +217,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
217
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
218
  guidance_scale, num_inference_steps, model_name,
219
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
220
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type],
221
  outputs=[result],
222
  queue=False,
223
  show_api=True,
@@ -240,7 +240,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
240
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
241
  guidance_scale, num_inference_steps, model_name,
242
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
243
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type],
244
  outputs=[result],
245
  queue=True,
246
  show_progress="full",
@@ -290,7 +290,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
290
  )
291
  lora_search_civitai_gallery.select(update_civitai_selection, None, [lora_search_civitai_result], queue=False, show_api=False)
292
 
293
- recom_prompt.change(enable_model_recom_prompt, [recom_prompt], [recom_prompt], queue=False, show_api=False)
294
  gr.on(
295
  triggers=[quality_selector.change, style_selector.change],
296
  fn=process_style_prompt,
@@ -301,7 +301,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
301
  show_api=False,
302
  )
303
 
304
- model_detail.change(enable_diffusers_model_detail, [model_detail, model_name], [model_detail, model_name], queue=False, show_api=False)
305
  model_name.change(get_t2i_model_info, [model_name], [model_info], queue=False, show_api=False)
306
 
307
  chat_model.change(select_dolphin_model, [chat_model, state], [chat_model, chat_format, chat_model_info, state], queue=True, show_progress="full", show_api=False)\
 
4
 
5
  # DiffuseCraft
6
  from dc import (infer, _infer, pass_result, get_diffusers_model_list, get_samplers, save_image_history,
7
+ get_vaes, enable_diffusers_model_detail, extract_exif_data, esrgan_upscale, UPSCALER_KEYS,
8
  preset_quality, preset_styles, process_style_prompt, get_all_lora_tupled_list, update_loras, apply_lora_prompt,
9
  download_my_lora, search_civitai_lora, update_civitai_selection, select_civitai_lora, search_civitai_lora_json,
10
  get_t2i_model_info, get_civitai_tag, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
 
204
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
205
  guidance_scale, num_inference_steps, model_name,
206
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
207
+ sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type, recom_prompt],
208
  outputs=[result],
209
  queue=True,
210
  show_progress="full",
 
217
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
218
  guidance_scale, num_inference_steps, model_name,
219
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
220
+ sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type, recom_prompt],
221
  outputs=[result],
222
  queue=False,
223
  show_api=True,
 
240
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
241
  guidance_scale, num_inference_steps, model_name,
242
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
243
+ sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type, recom_prompt],
244
  outputs=[result],
245
  queue=True,
246
  show_progress="full",
 
290
  )
291
  lora_search_civitai_gallery.select(update_civitai_selection, None, [lora_search_civitai_result], queue=False, show_api=False)
292
 
293
+ #recom_prompt.change(enable_model_recom_prompt, [recom_prompt], [recom_prompt], queue=False, show_api=False)
294
  gr.on(
295
  triggers=[quality_selector.change, style_selector.change],
296
  fn=process_style_prompt,
 
301
  show_api=False,
302
  )
303
 
304
+ model_detail.change(enable_diffusers_model_detail, [model_detail, model_name, state], [model_detail, model_name, state], queue=False, show_api=False)
305
  model_name.change(get_t2i_model_info, [model_name], [model_info], queue=False, show_api=False)
306
 
307
  chat_model.change(select_dolphin_model, [chat_model, state], [chat_model, chat_format, chat_model_info, state], queue=True, show_progress="full", show_api=False)\