John6666 commited on
Commit
900721f
1 Parent(s): 804d122

Upload 22 files

Browse files
Files changed (3) hide show
  1. app.py +228 -55
  2. dc.py +47 -73
  3. env.py +4 -93
app.py CHANGED
@@ -9,7 +9,9 @@ from dc import (infer, _infer, pass_result, get_diffusers_model_list, get_sample
9
  download_my_lora, search_civitai_lora, update_civitai_selection, select_civitai_lora, search_civitai_lora_json,
10
  get_t2i_model_info, get_civitai_tag, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
11
  SCHEDULE_TYPE_OPTIONS, SCHEDULE_PREDICTION_TYPE_OPTIONS, preprocessor_tab, SDXL_TASK, TASK_MODEL_LIST,
12
- PROMPT_W_OPTIONS)
 
 
13
  # Translator
14
  from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
15
  get_llm_formats, get_dolphin_model_format, get_dolphin_models,
@@ -63,6 +65,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
63
 
64
  result = gr.Image(label="Result", elem_id="result", format="png", type="filepath", show_label=False, interactive=False,
65
  show_download_button=True, show_share_button=False, container=True)
 
66
  with gr.Accordion("History", open=False):
67
  history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", format="png", interactive=False, show_share_button=False,
68
  show_download_button=True)
@@ -70,42 +73,48 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
70
  history_clear_button = gr.Button(value="Clear History", variant="secondary")
71
  history_clear_button.click(lambda: ([], []), None, [history_gallery, history_files], queue=False, show_api=False)
72
 
73
- with gr.Accordion("Advanced Settings", open=False):
74
- with gr.Row():
75
- negative_prompt = gr.Text(label="Negative prompt", lines=1, max_lines=6, placeholder="Enter a negative prompt", show_copy_button=True,
76
- value="(low quality, worst quality:1.2), very displeasing, watermark, signature, ugly")
77
-
78
- with gr.Row():
79
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
80
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
81
- gpu_duration = gr.Slider(label="GPU time duration (seconds)", minimum=5, maximum=240, value=59)
82
-
83
- with gr.Row():
84
- width = gr.Slider(label="Width", minimum=MIN_IMAGE_SIZE, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 832
85
- height = gr.Slider(label="Height", minimum=MIN_IMAGE_SIZE, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 1216
86
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=30.0, step=0.1, value=7)
87
- guidance_rescale = gr.Slider(label="CFG rescale", value=0., step=0.01, minimum=0., maximum=1.5)
88
- num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=100, step=1, value=28)
89
- pag_scale = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
90
- clip_skip = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
91
- free_u = gr.Checkbox(value=False, label="FreeU")
92
-
93
- with gr.Row():
94
- with gr.Column(scale=4):
95
- model_name = gr.Dropdown(label="Model", info="You can enter a huggingface model repo_id to want to use.",
96
- choices=get_diffusers_model_list(), value=get_diffusers_model_list()[0],
97
- allow_custom_value=True, interactive=True, min_width=320)
98
- model_info = gr.Markdown(elem_classes="info")
99
- with gr.Column(scale=1):
100
- model_detail = gr.Checkbox(label="Show detail of model in list", value=False)
101
 
102
- with gr.Row():
103
- sampler = gr.Dropdown(label="Sampler", choices=get_samplers(), value="Euler")
104
- schedule_type = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
105
- schedule_prediction_type = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
106
- vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
 
108
- with gr.Accordion("LoRA", open=True, visible=True):
109
  def lora_dropdown(label, visible=True):
110
  return gr.Dropdown(label=label, choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320, visible=visible)
111
 
@@ -190,17 +199,136 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
190
  lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
191
  lora_download_url = gr.Textbox(label="LoRA's download URL", placeholder="https://civitai.com/api/download/models/28907", info="It has to be .safetensors files, and you can also download them from Hugging Face.", lines=1)
192
  lora_download = gr.Button("Get and set LoRA and apply to prompt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
 
194
- with gr.Row():
195
- quality_selector = gr.Radio(label="Quality Tag Presets", interactive=True, choices=list(preset_quality.keys()), value="None", scale=3)
196
- style_selector = gr.Radio(label="Style Presets", interactive=True, choices=list(preset_styles.keys()), value="None", scale=3)
197
- recom_prompt = gr.Checkbox(label="Recommended prompt", value=True, scale=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
- with gr.Accordion("Other", open=True, visible=True):
200
- task = gr.Dropdown(label="Task", choices=SDXL_TASK, value=TASK_MODEL_LIST[0])
201
- prompt_syntax = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[1][1])
202
-
203
- with gr.Accordion("Translation Settings", open=False):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  chatbot = gr.Chatbot(render_markdown=False, visible=False) # component for auto-translation
205
  chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0][1], allow_custom_value=True, label="Model")
206
  chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0][1]), label="Model info")
@@ -225,15 +353,34 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
225
  cache_examples=False,
226
  )
227
 
 
 
 
 
 
 
228
  gr.on( #lambda x: None, inputs=None, outputs=result).then(
229
  triggers=[run_button.click, prompt.submit],
230
  fn=infer,
231
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
232
  guidance_scale, num_inference_steps, model_name,
233
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt,
234
- lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt,
235
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
236
- clip_skip, pag_scale, free_u, guidance_rescale, gpu_duration, recom_prompt],
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  outputs=[result],
238
  queue=True,
239
  show_progress="full",
@@ -246,9 +393,22 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
246
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
247
  guidance_scale, num_inference_steps, model_name,
248
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt,
249
- lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt,
250
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
251
- clip_skip, pag_scale, free_u, guidance_rescale, gpu_duration, recom_prompt],
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  outputs=[result],
253
  queue=False,
254
  show_api=True,
@@ -271,9 +431,22 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
271
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
272
  guidance_scale, num_inference_steps, model_name,
273
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt,
274
- lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt,
275
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
276
- clip_skip, pag_scale, free_u, guidance_rescale, gpu_duration, recom_prompt],
 
 
 
 
 
 
 
 
 
 
 
 
 
277
  outputs=[result],
278
  queue=True,
279
  show_progress="full",
@@ -285,7 +458,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
285
 
286
  gr.on(
287
  triggers=[lora1.change, lora1_wt.change, lora2.change, lora2_wt.change, lora3.change, lora3_wt.change,
288
- lora4.change, lora4_wt.change, lora5.change, lora5_wt.change, lora6.change, lora6_wt.change, lora7.change, lora7_wt.change],
289
  fn=update_loras,
290
  inputs=[prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt],
291
  outputs=[prompt, lora1, lora1_wt, lora1_info, lora1_copy, lora1_md,
@@ -433,7 +606,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
433
  ).success(
434
  insert_recom_prompt, [output_text_pony, dummy_np, recom_pony], [output_text_pony, dummy_np], queue=False, show_api=False,
435
  ).success(lambda: (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)),
436
- None, [copy_btn, copy_btn_pony, copy_prompt_btn, copy_prompt_btn_pony], queue=False, show_api=False)
437
  copy_btn.click(gradio_copy_text, [output_text], js=COPY_ACTION_JS, show_api=False)
438
  copy_btn_pony.click(gradio_copy_text, [output_text_pony], js=COPY_ACTION_JS, show_api=False)
439
  copy_prompt_btn.click(gradio_copy_prompt, inputs=[output_text], outputs=[prompt], show_api=False)
 
9
  download_my_lora, search_civitai_lora, update_civitai_selection, select_civitai_lora, search_civitai_lora_json,
10
  get_t2i_model_info, get_civitai_tag, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
11
  SCHEDULE_TYPE_OPTIONS, SCHEDULE_PREDICTION_TYPE_OPTIONS, preprocessor_tab, SDXL_TASK, TASK_MODEL_LIST,
12
+ PROMPT_W_OPTIONS, POST_PROCESSING_SAMPLER, IP_ADAPTERS_SD, IP_ADAPTERS_SDXL, DIFFUSERS_CONTROLNET_MODEL,
13
+ TASK_AND_PREPROCESSORS, update_task_options, change_preprocessor_choices, get_ti_choices,
14
+ update_textual_inversion, set_textual_inversion_prompt, create_mask_now)
15
  # Translator
16
  from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
17
  get_llm_formats, get_dolphin_model_format, get_dolphin_models,
 
65
 
66
  result = gr.Image(label="Result", elem_id="result", format="png", type="filepath", show_label=False, interactive=False,
67
  show_download_button=True, show_share_button=False, container=True)
68
+
69
  with gr.Accordion("History", open=False):
70
  history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", format="png", interactive=False, show_share_button=False,
71
  show_download_button=True)
 
73
  history_clear_button = gr.Button(value="Clear History", variant="secondary")
74
  history_clear_button.click(lambda: ([], []), None, [history_gallery, history_files], queue=False, show_api=False)
75
 
76
+ with gr.Accordion("Advanced Settings", open=True):
77
+ task = gr.Radio(label="Task", choices=SDXL_TASK, value=TASK_MODEL_LIST[0])
78
+ with gr.Tab("Model & Prompt"):
79
+ with gr.Row():
80
+ negative_prompt = gr.Text(label="Negative prompt", lines=1, max_lines=6, placeholder="Enter a negative prompt", show_copy_button=True,
81
+ value="(low quality, worst quality:1.2), very displeasing, watermark, signature, ugly")
82
+ with gr.Row():
83
+ with gr.Column(scale=4):
84
+ model_name = gr.Dropdown(label="Model", info="You can enter a huggingface model repo_id to want to use.",
85
+ choices=get_diffusers_model_list(), value=get_diffusers_model_list()[0],
86
+ allow_custom_value=True, interactive=True, min_width=320)
87
+ model_info = gr.Markdown(elem_classes="info")
88
+ with gr.Column(scale=1):
89
+ model_detail = gr.Checkbox(label="Show detail of model in list", value=False)
90
+ with gr.Row():
91
+ quality_selector = gr.Radio(label="Quality Tag Presets", interactive=True, choices=list(preset_quality.keys()), value="None", scale=3)
92
+ style_selector = gr.Radio(label="Style Presets", interactive=True, choices=list(preset_styles.keys()), value="None", scale=3)
93
+ recom_prompt = gr.Checkbox(label="Recommended prompt", value=True, scale=1)
 
 
 
 
 
 
 
 
 
 
94
 
95
+ with gr.Tab("Generation Settings"):
96
+ with gr.Row():
97
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
98
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
99
+ gpu_duration = gr.Slider(label="GPU time duration (seconds)", minimum=5, maximum=240, value=59)
100
+ with gr.Row():
101
+ width = gr.Slider(label="Width", minimum=MIN_IMAGE_SIZE, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 832
102
+ height = gr.Slider(label="Height", minimum=MIN_IMAGE_SIZE, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 1216
103
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=30.0, step=0.1, value=7)
104
+ guidance_rescale = gr.Slider(label="CFG rescale", value=0., step=0.01, minimum=0., maximum=1.5)
105
+ with gr.Row():
106
+ num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=100, step=1, value=28)
107
+ pag_scale = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
108
+ clip_skip = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
109
+ free_u = gr.Checkbox(value=False, label="FreeU")
110
+ with gr.Row():
111
+ sampler = gr.Dropdown(label="Sampler", choices=get_samplers(), value="Euler")
112
+ schedule_type = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
113
+ schedule_prediction_type = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
114
+ vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
115
+ prompt_syntax = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[1][1])
116
 
117
+ with gr.Tab("LoRA"):
118
  def lora_dropdown(label, visible=True):
119
  return gr.Dropdown(label=label, choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320, visible=visible)
120
 
 
199
  lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
200
  lora_download_url = gr.Textbox(label="LoRA's download URL", placeholder="https://civitai.com/api/download/models/28907", info="It has to be .safetensors files, and you can also download them from Hugging Face.", lines=1)
201
  lora_download = gr.Button("Get and set LoRA and apply to prompt")
202
+
203
+ with gr.Tab("ControlNet / Img2img / Inpaint"):
204
+ with gr.Row():
205
+ image_control = gr.Image(label="Image ControlNet / Inpaint / Img2img", type="filepath", height=384, sources=["upload", "clipboard", "webacam"], show_share_button=False)
206
+ #image_control = gr.ImageEditor(label="Image ControlNet / Inpaint / Img2img", type="filepath", sources=["upload", "clipboard", "webcam"], image_mode='RGB',
207
+ # show_share_button=False, show_fullscreen_button=False, layers=False, canvas_size=(384, 384), width=384, height=512,
208
+ # brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed", default_size=32), eraser=gr.Eraser(default_size="32"))
209
+ image_mask = gr.Image(label="Image Mask", type="filepath", height=384, sources=["upload", "clipboard"], show_share_button=False)
210
+ with gr.Row():
211
+ strength = gr.Slider(minimum=0.01, maximum=1.0, step=0.01, value=0.55, label="Strength",
212
+ info="This option adjusts the level of changes for img2img and inpainting.")
213
+ image_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution",
214
+ info="The maximum proportional size of the generated image based on the uploaded image.")
215
+ with gr.Row():
216
+ controlnet_model = gr.Dropdown(label="ControlNet model", choices=DIFFUSERS_CONTROLNET_MODEL, value=DIFFUSERS_CONTROLNET_MODEL[0])
217
+ control_net_output_scaling = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
218
+ control_net_start_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
219
+ control_net_stop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
220
+ with gr.Row():
221
+ preprocessor_name = gr.Dropdown(label="Preprocessor Name", choices=TASK_AND_PREPROCESSORS["canny"])
222
+ preprocess_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
223
+ low_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
224
+ high_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
225
+ with gr.Row():
226
+ value_threshold = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
227
+ distance_threshold = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
228
+ recolor_gamma_correction = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
229
+ tile_blur_sigma = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
230
+
231
+ with gr.Tab("IP-Adapter"):
232
+ IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
233
+ MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
234
+ with gr.Row():
235
+ with gr.Accordion("IP-Adapter 1", open=True, visible=True):
236
+ image_ip1 = gr.Image(label="IP Image", type="filepath", height=384, sources=["upload", "clipboard"], show_share_button=False)
237
+ mask_ip1 = gr.Image(label="IP Mask (optional)", type="filepath", height=384, sources=["upload", "clipboard"], show_share_button=False)
238
+ with gr.Row():
239
+ model_ip1 = gr.Dropdown(value="plus_face", label="Model", choices=IP_MODELS)
240
+ mode_ip1 = gr.Dropdown(value="original", label="Mode", choices=MODE_IP_OPTIONS)
241
+ scale_ip1 = gr.Slider(minimum=0., maximum=2., step=0.01, value=0.7, label="Scale")
242
+ with gr.Accordion("IP-Adapter 2", open=True, visible=True):
243
+ image_ip2 = gr.Image(label="IP Image", type="filepath", height=384, sources=["upload", "clipboard"], show_share_button=False)
244
+ mask_ip2 = gr.Image(label="IP Mask (optional)", type="filepath", height=384, sources=["upload", "clipboard"], show_share_button=False)
245
+ with gr.Row():
246
+ model_ip2 = gr.Dropdown(value="base", label="Model", choices=IP_MODELS)
247
+ mode_ip2 = gr.Dropdown(value="style", label="Mode", choices=MODE_IP_OPTIONS)
248
+ scale_ip2 = gr.Slider(minimum=0., maximum=2., step=0.01, value=0.7, label="Scale")
249
 
250
+ with gr.Tab("Inpaint Mask Maker"):
251
+ with gr.Row():
252
+ with gr.Column(scale=2):
253
+ image_base = gr.ImageEditor(sources=["upload", "clipboard", "webcam"],
254
+ brush=gr.Brush(default_size="32", color_mode="fixed", colors=["rgba(0, 0, 0, 1)", "rgba(0, 0, 0, 0.1)", "rgba(255, 255, 255, 0.1)"]),
255
+ eraser=gr.Eraser(default_size="32"), show_share_button=False, show_fullscreen_button=False,
256
+ canvas_size=(384, 384), width=384, height=512)
257
+ invert_mask = gr.Checkbox(value=False, label="Invert mask")
258
+ cm_btn = gr.Button("Create mask")
259
+ with gr.Column(scale=1):
260
+ img_source = gr.Image(interactive=False, height=384, show_share_button=False)
261
+ img_result = gr.Image(label="Mask image", show_label=True, interactive=False, height=384, show_share_button=False)
262
+ cm_btn_send = gr.Button("Send to ControlNet / Img2img / Inpaint")
263
+ cm_btn_send_ip1 = gr.Button("Send to IP-Adapter 1")
264
+ cm_btn_send_ip2 = gr.Button("Send to IP-Adapter 2")
265
+ cm_btn.click(create_mask_now, [image_base, invert_mask], [img_source, img_result], queue=False, show_api=False)
266
+ def send_img(img_source, img_result):
267
+ return img_source, img_result
268
+ cm_btn_send.click(send_img, [img_source, img_result], [image_control, image_mask], queue=False, show_api=False)
269
+ cm_btn_send_ip1.click(send_img, [img_source, img_result], [image_ip1, mask_ip1], queue=False, show_api=False)
270
+ cm_btn_send_ip2.click(send_img, [img_source, img_result], [image_ip2, mask_ip2], queue=False, show_api=False)
271
+
272
+ with gr.Tab("Hires fix"):
273
+ with gr.Row():
274
+ upscaler_model_path = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS, value=UPSCALER_KEYS[0])
275
+ upscaler_increases_size = gr.Slider(minimum=1.1, maximum=4., step=0.1, value=1.2, label="Upscale by")
276
+ esrgan_tile = gr.Slider(minimum=0, value=0, maximum=500, step=1, label="ESRGAN Tile")
277
+ esrgan_tile_overlap = gr.Slider(minimum=1, maximum=200, step=1, value=8, label="ESRGAN Tile Overlap")
278
+ with gr.Row():
279
+ hires_steps = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
280
+ hires_denoising_strength = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
281
+ hires_sampler = gr.Dropdown(label="Hires Sampler", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
282
+ hires_schedule_list = ["Use same schedule type"] + SCHEDULE_TYPE_OPTIONS
283
+ hires_schedule_type = gr.Dropdown(label="Hires Schedule type", choices=hires_schedule_list, value=hires_schedule_list[0])
284
+ hires_guidance_scale = gr.Slider(minimum=-1., maximum=30., step=0.5, value=-1., label="Hires CFG", info="If the value is -1, the main CFG will be used")
285
+ with gr.Row():
286
+ hires_prompt = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
287
+ hires_negative_prompt = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
288
 
289
+ with gr.Tab("Detailfix"):
290
+ with gr.Row():
291
+ # Adetailer Inpaint Only
292
+ adetailer_inpaint_only = gr.Checkbox(label="Inpaint only", value=True)
293
+ # Adetailer Verbose
294
+ adetailer_verbose = gr.Checkbox(label="Verbose", value=False)
295
+ # Adetailer Sampler
296
+ adetailer_sampler = gr.Dropdown(label="Adetailer sampler:", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
297
+ with gr.Row():
298
+ with gr.Accordion("Detailfix A", open=True, visible=True):
299
+ # Adetailer A
300
+ adetailer_active_a = gr.Checkbox(label="Enable Adetailer A", value=False)
301
+ prompt_ad_a = gr.Textbox(label="Main prompt", placeholder="Main prompt will be use", lines=3)
302
+ negative_prompt_ad_a = gr.Textbox(label="Negative prompt", placeholder="Main negative prompt will be use", lines=3)
303
+ with gr.Row():
304
+ strength_ad_a = gr.Number(label="Strength:", value=0.35, step=0.01, minimum=0.01, maximum=1.0)
305
+ face_detector_ad_a = gr.Checkbox(label="Face detector", value=False)
306
+ person_detector_ad_a = gr.Checkbox(label="Person detector", value=True)
307
+ hand_detector_ad_a = gr.Checkbox(label="Hand detector", value=False)
308
+ with gr.Row():
309
+ mask_dilation_a = gr.Number(label="Mask dilation:", value=4, minimum=1)
310
+ mask_blur_a = gr.Number(label="Mask blur:", value=4, minimum=1)
311
+ mask_padding_a = gr.Number(label="Mask padding:", value=32, minimum=1)
312
+ with gr.Accordion("Detailfix B", open=True, visible=True):
313
+ # Adetailer B
314
+ adetailer_active_b = gr.Checkbox(label="Enable Adetailer B", value=False)
315
+ prompt_ad_b = gr.Textbox(label="Main prompt", placeholder="Main prompt will be use", lines=3)
316
+ negative_prompt_ad_b = gr.Textbox(label="Negative prompt", placeholder="Main negative prompt will be use", lines=3)
317
+ with gr.Row():
318
+ strength_ad_b = gr.Number(label="Strength:", value=0.35, step=0.01, minimum=0.01, maximum=1.0)
319
+ face_detector_ad_b = gr.Checkbox(label="Face detector", value=False)
320
+ person_detector_ad_b = gr.Checkbox(label="Person detector", value=True)
321
+ hand_detector_ad_b = gr.Checkbox(label="Hand detector", value=False)
322
+ with gr.Row():
323
+ mask_dilation_b = gr.Number(label="Mask dilation:", value=4, minimum=1)
324
+ mask_blur_b = gr.Number(label="Mask blur:", value=4, minimum=1)
325
+ mask_padding_b = gr.Number(label="Mask padding:", value=32, minimum=1)
326
+
327
+ with gr.Tab("Textual inversion"):
328
+ active_textual_inversion = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
329
+ use_textual_inversion = gr.CheckboxGroup(choices=get_ti_choices(model_name.value) if active_textual_inversion.value else [], value=None, label="Use Textual Invertion in prompt")
330
+
331
+ with gr.Tab("Translation Settings"):
332
  chatbot = gr.Chatbot(render_markdown=False, visible=False) # component for auto-translation
333
  chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0][1], allow_custom_value=True, label="Model")
334
  chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0][1]), label="Model info")
 
353
  cache_examples=False,
354
  )
355
 
356
+ model_name.change(update_task_options, [model_name, task], [task], queue=False, show_api=False)
357
+ task.change(change_preprocessor_choices, [task], [preprocessor_name], queue=False, show_api=False)
358
+ active_textual_inversion.change(update_textual_inversion, [active_textual_inversion, model_name], [use_textual_inversion], queue=False, show_api=False)
359
+ model_name.change(update_textual_inversion, [active_textual_inversion, model_name], [use_textual_inversion], queue=False, show_api=False)
360
+ use_textual_inversion.change(set_textual_inversion_prompt, [use_textual_inversion, prompt, negative_prompt, prompt_syntax], [prompt, negative_prompt])
361
+
362
  gr.on( #lambda x: None, inputs=None, outputs=result).then(
363
  triggers=[run_button.click, prompt.submit],
364
  fn=infer,
365
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
366
  guidance_scale, num_inference_steps, model_name,
367
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt,
368
+ lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt, task, prompt_syntax,
369
+ sampler, vae_model, schedule_type, schedule_prediction_type,
370
+ clip_skip, pag_scale, free_u, guidance_rescale,
371
+ image_control, image_mask, strength, image_resolution,
372
+ controlnet_model, control_net_output_scaling, control_net_start_threshold, control_net_stop_threshold,
373
+ preprocessor_name, preprocess_resolution, low_threshold, high_threshold,
374
+ value_threshold, distance_threshold, recolor_gamma_correction, tile_blur_sigma,
375
+ image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1,
376
+ image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2,
377
+ upscaler_model_path, upscaler_increases_size, esrgan_tile, esrgan_tile_overlap, hires_steps, hires_denoising_strength,
378
+ hires_sampler, hires_schedule_type, hires_guidance_scale, hires_prompt, hires_negative_prompt,
379
+ adetailer_inpaint_only, adetailer_verbose, adetailer_sampler, adetailer_active_a,
380
+ prompt_ad_a, negative_prompt_ad_a, strength_ad_a, face_detector_ad_a, person_detector_ad_a, hand_detector_ad_a,
381
+ mask_dilation_a, mask_blur_a, mask_padding_a, adetailer_active_b, prompt_ad_b, negative_prompt_ad_b, strength_ad_b,
382
+ face_detector_ad_b, person_detector_ad_b, hand_detector_ad_b, mask_dilation_b, mask_blur_b, mask_padding_b,
383
+ active_textual_inversion, gpu_duration, auto_trans, recom_prompt],
384
  outputs=[result],
385
  queue=True,
386
  show_progress="full",
 
393
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
394
  guidance_scale, num_inference_steps, model_name,
395
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt,
396
+ lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt, task, prompt_syntax,
397
+ sampler, vae_model, schedule_type, schedule_prediction_type,
398
+ clip_skip, pag_scale, free_u, guidance_rescale,
399
+ image_control, image_mask, strength, image_resolution,
400
+ controlnet_model, control_net_output_scaling, control_net_start_threshold, control_net_stop_threshold,
401
+ preprocessor_name, preprocess_resolution, low_threshold, high_threshold,
402
+ value_threshold, distance_threshold, recolor_gamma_correction, tile_blur_sigma,
403
+ image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1,
404
+ image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2,
405
+ upscaler_model_path, upscaler_increases_size, esrgan_tile, esrgan_tile_overlap, hires_steps, hires_denoising_strength,
406
+ hires_sampler, hires_schedule_type, hires_guidance_scale, hires_prompt, hires_negative_prompt,
407
+ adetailer_inpaint_only, adetailer_verbose, adetailer_sampler, adetailer_active_a,
408
+ prompt_ad_a, negative_prompt_ad_a, strength_ad_a, face_detector_ad_a, person_detector_ad_a, hand_detector_ad_a,
409
+ mask_dilation_a, mask_blur_a, mask_padding_a, adetailer_active_b, prompt_ad_b, negative_prompt_ad_b, strength_ad_b,
410
+ face_detector_ad_b, person_detector_ad_b, hand_detector_ad_b, mask_dilation_b, mask_blur_b, mask_padding_b,
411
+ active_textual_inversion, gpu_duration, auto_trans, recom_prompt],
412
  outputs=[result],
413
  queue=False,
414
  show_api=True,
 
431
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
432
  guidance_scale, num_inference_steps, model_name,
433
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt,
434
+ lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt, task, prompt_syntax,
435
+ sampler, vae_model, schedule_type, schedule_prediction_type,
436
+ clip_skip, pag_scale, free_u, guidance_rescale,
437
+ image_control, image_mask, strength, image_resolution,
438
+ controlnet_model, control_net_output_scaling, control_net_start_threshold, control_net_stop_threshold,
439
+ preprocessor_name, preprocess_resolution, low_threshold, high_threshold,
440
+ value_threshold, distance_threshold, recolor_gamma_correction, tile_blur_sigma,
441
+ image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1,
442
+ image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2,
443
+ upscaler_model_path, upscaler_increases_size, esrgan_tile, esrgan_tile_overlap, hires_steps, hires_denoising_strength,
444
+ hires_sampler, hires_schedule_type, hires_guidance_scale, hires_prompt, hires_negative_prompt,
445
+ adetailer_inpaint_only, adetailer_verbose, adetailer_sampler, adetailer_active_a,
446
+ prompt_ad_a, negative_prompt_ad_a, strength_ad_a, face_detector_ad_a, person_detector_ad_a, hand_detector_ad_a,
447
+ mask_dilation_a, mask_blur_a, mask_padding_a, adetailer_active_b, prompt_ad_b, negative_prompt_ad_b, strength_ad_b,
448
+ face_detector_ad_b, person_detector_ad_b, hand_detector_ad_b, mask_dilation_b, mask_blur_b, mask_padding_b,
449
+ active_textual_inversion, gpu_duration, auto_trans, recom_prompt],
450
  outputs=[result],
451
  queue=True,
452
  show_progress="full",
 
458
 
459
  gr.on(
460
  triggers=[lora1.change, lora1_wt.change, lora2.change, lora2_wt.change, lora3.change, lora3_wt.change,
461
+ lora4.change, lora4_wt.change, lora5.change, lora5_wt.change, lora6.change, lora6_wt.change, lora7.change, lora7_wt.change, prompt_syntax.change],
462
  fn=update_loras,
463
  inputs=[prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt],
464
  outputs=[prompt, lora1, lora1_wt, lora1_info, lora1_copy, lora1_md,
 
606
  ).success(
607
  insert_recom_prompt, [output_text_pony, dummy_np, recom_pony], [output_text_pony, dummy_np], queue=False, show_api=False,
608
  ).success(lambda: (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)),
609
+ None, [copy_btn, copy_btn_pony, copy_prompt_btn, copy_prompt_btn_pony], queue=False, show_api=False)
610
  copy_btn.click(gradio_copy_text, [output_text], js=COPY_ACTION_JS, show_api=False)
611
  copy_btn_pony.click(gradio_copy_text, [output_text_pony], js=COPY_ACTION_JS, show_api=False)
612
  copy_prompt_btn.click(gradio_copy_prompt, inputs=[output_text], outputs=[prompt], show_api=False)
dc.py CHANGED
@@ -119,8 +119,8 @@ vae_model_list = get_model_list(DIRECTORY_VAES)
119
  vae_model_list.insert(0, "BakedVAE")
120
  vae_model_list.insert(0, "None")
121
 
122
- #download_private_repo(HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, DIRECTORY_EMBEDS_SDXL, False)
123
- #download_private_repo(HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO, DIRECTORY_EMBEDS_POSITIVE_SDXL, False)
124
  embed_sdxl_list = get_model_list(DIRECTORY_EMBEDS_SDXL) + get_model_list(DIRECTORY_EMBEDS_POSITIVE_SDXL)
125
 
126
  def get_embed_list(pipeline_name):
@@ -148,7 +148,7 @@ class GuiSD:
148
  self.last_load = datetime.now()
149
  self.inventory = []
150
 
151
- def update_storage_models(self, storage_floor_gb=32, required_inventory_for_purge=3):
152
  while get_used_storage_gb() > storage_floor_gb:
153
  if len(self.inventory) < required_inventory_for_purge:
154
  break
@@ -741,8 +741,8 @@ import random
741
  import json
742
  import shutil
743
  from tagger.tagger import insert_model_recom_prompt
744
- from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path, valid_model_name,
745
- get_local_model_list, get_private_lora_model_lists, get_valid_lora_name, get_state, set_state,
746
  get_valid_lora_path, get_valid_lora_wt, get_lora_info, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
747
  normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history,
748
  get_all_lora_list, get_all_lora_tupled_list, update_lora_dict, download_lora, copy_lora, download_my_lora, set_prompt_loras,
@@ -753,45 +753,26 @@ from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_pat
753
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
754
  model_name=load_diffusers_format_model[0], lora1=None, lora1_wt=1.0, lora2=None, lora2_wt=1.0,
755
  lora3=None, lora3_wt=1.0, lora4=None, lora4_wt=1.0, lora5=None, lora5_wt=1.0, lora6=None, lora6_wt=1.0, lora7=None, lora7_wt=1.0,
756
- sampler="Euler", vae=None, translate=False, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
757
- clip_skip=True, pag_scale=0.0, free_u=False, guidance_rescale=0., gpu_duration=59, recom_prompt=True, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
 
 
 
758
  MAX_SEED = np.iinfo(np.int32).max
759
 
760
- task = TASK_MODEL_LIST[0]
761
- image_control = None
762
- preprocessor_name = "Canny"
763
- preprocess_resolution = 512
764
- image_resolution = 1024
765
  style_prompt = None
766
  style_json = None
767
- image_mask = None
768
- strength = 0.35
769
- low_threshold = 100
770
- high_threshold = 200
771
- value_threshold = 0.1
772
- distance_threshold = 0.1
773
- recolor_gamma_correction = 1.
774
- tile_blur_sigma = 9
775
- control_net_output_scaling = 1.0
776
- control_net_start_threshold = 0.
777
- control_net_stop_threshold = 1.
778
- active_textual_inversion = False
779
- prompt_syntax = "Classic"
780
- upscaler_model_path = None # UPSCALER_KEYS[0]
781
- upscaler_increases_size = 1.0 # 1.2
782
- esrgan_tile = 5
783
- esrgan_tile_overlap = 8
784
- hires_steps = 30
785
- hires_denoising_strength = 0.55
786
- hires_sampler = "Use same sampler" # POST_PROCESSING_SAMPLER[0]
787
- hires_prompt = ""
788
- hires_negative_prompt = ""
789
  hires_before_adetailer = False
790
  hires_after_adetailer = True
791
- hires_schedule_list = ["Use same schedule type"] + SCHEDULE_TYPE_OPTIONS
792
- hires_schedule_type = hires_schedule_list[0]
793
- hires_guidance_scale = -1
794
- controlnet_model = DIFFUSERS_CONTROLNET_MODEL[0]
795
  loop_generation = 1
796
  leave_progress_bar = True
797
  disable_progress_bar = False
@@ -808,40 +789,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
808
  adapter_conditioning_factor = 0.55
809
  xformers_memory_efficient_attention = False
810
  generator_in_cpu = False
811
- adetailer_inpaint_only = True
812
- adetailer_verbose = False
813
- adetailer_sampler = "Use same sampler"
814
- adetailer_active_a = False
815
- prompt_ad_a = ""
816
- negative_prompt_ad_a = ""
817
- strength_ad_a = 0.35
818
- face_detector_ad_a = True
819
- person_detector_ad_a = True
820
- hand_detector_ad_a = False
821
- mask_dilation_a = 4
822
- mask_blur_a = 4
823
- mask_padding_a = 32
824
- adetailer_active_b = False
825
- prompt_ad_b = ""
826
- negative_prompt_ad_b = ""
827
- strength_ad_b = 0.35
828
- face_detector_ad_b = True
829
- person_detector_ad_b = True
830
- hand_detector_ad_b = False
831
- mask_dilation_b = 4
832
- mask_blur_b = 4
833
- mask_padding_b = 32
834
  retain_task_cache = True
835
- image_ip1 = None
836
- mask_ip1 = None
837
- model_ip1 = "plus_face"
838
- mode_ip1 = "original"
839
- scale_ip1 = 0.7
840
- image_ip2 = None
841
- mask_ip2 = None
842
- model_ip2 = "base"
843
- mode_ip2 = "style"
844
- scale_ip2 = 0.7
845
  load_lora_cpu = False
846
  verbose_info = False
847
 
@@ -901,8 +849,8 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
901
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
902
  model_name=load_diffusers_format_model[0], lora1=None, lora1_wt=1.0, lora2=None, lora2_wt=1.0,
903
  lora3=None, lora3_wt=1.0, lora4=None, lora4_wt=1.0, lora5=None, lora5_wt=1.0, lora6=None, lora6_wt=1.0, lora7=None, lora7_wt=1.0,
904
- sampler="Euler", vae=None, translate=False, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
905
- clip_skip=True, pag_scale=0.0, free_u=False, guidance_rescale=0., gpu_duration=59, recom_prompt=True, progress=gr.Progress(track_tqdm=True)):
906
  return gr.update()
907
 
908
 
@@ -922,6 +870,32 @@ def get_vaes():
922
  return vae_model_list
923
 
924
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
925
  cached_diffusers_model_tupled_list = get_tupled_model_list(load_diffusers_format_model)
926
  def get_diffusers_model_list(state: dict = {}):
927
  show_diffusers_model_list_detail = get_state(state, "show_diffusers_model_list_detail")
 
119
  vae_model_list.insert(0, "BakedVAE")
120
  vae_model_list.insert(0, "None")
121
 
122
+ download_private_repo(HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, DIRECTORY_EMBEDS_SDXL, False)
123
+ download_private_repo(HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO, DIRECTORY_EMBEDS_POSITIVE_SDXL, False)
124
  embed_sdxl_list = get_model_list(DIRECTORY_EMBEDS_SDXL) + get_model_list(DIRECTORY_EMBEDS_POSITIVE_SDXL)
125
 
126
  def get_embed_list(pipeline_name):
 
148
  self.last_load = datetime.now()
149
  self.inventory = []
150
 
151
+ def update_storage_models(self, storage_floor_gb=24, required_inventory_for_purge=3):
152
  while get_used_storage_gb() > storage_floor_gb:
153
  if len(self.inventory) < required_inventory_for_purge:
154
  break
 
741
  import json
742
  import shutil
743
  from tagger.tagger import insert_model_recom_prompt
744
+ from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path, valid_model_name, set_textual_inversion_prompt,
745
+ get_local_model_list, get_model_pipeline, get_private_lora_model_lists, get_valid_lora_name, get_state, set_state,
746
  get_valid_lora_path, get_valid_lora_wt, get_lora_info, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
747
  normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history,
748
  get_all_lora_list, get_all_lora_tupled_list, update_lora_dict, download_lora, copy_lora, download_my_lora, set_prompt_loras,
 
753
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
754
  model_name=load_diffusers_format_model[0], lora1=None, lora1_wt=1.0, lora2=None, lora2_wt=1.0,
755
  lora3=None, lora3_wt=1.0, lora4=None, lora4_wt=1.0, lora5=None, lora5_wt=1.0, lora6=None, lora6_wt=1.0, lora7=None, lora7_wt=1.0,
756
+ task=TASK_MODEL_LIST[0], prompt_syntax="Classic", sampler="Euler", vae=None, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
757
+ clip_skip=True, pag_scale=0.0, free_u=False, guidance_rescale=0., image_control=None, image_mask=None, strength=0.35, image_resolution=1024,
758
+ controlnet_model=DIFFUSERS_CONTROLNET_MODEL[0], control_net_output_scaling=1.0, control_net_start_threshold=0., control_net_stop_threshold=1.,
759
+ preprocessor_name="Canny", preprocess_resolution=512, low_threshold=100, high_threshold=200,
760
+ value_threshold=0.1, distance_threshold=0.1, recolor_gamma_correction=1., tile_blur_sigma=9,
761
+ image_ip1=None, mask_ip1=None, model_ip1="plus_face", mode_ip1="original", scale_ip1=0.7,
762
+ image_ip2=None, mask_ip2=None, model_ip2="base", mode_ip2="style", scale_ip2=0.7,
763
+ upscaler_model_path=None, upscaler_increases_size=1.0, esrgan_tile=5, esrgan_tile_overlap=8, hires_steps=30, hires_denoising_strength=0.55,
764
+ hires_sampler="Use same sampler", hires_schedule_type="Use same schedule type", hires_guidance_scale=-1, hires_prompt="", hires_negative_prompt="",
765
+ adetailer_inpaint_only=True, adetailer_verbose=False, adetailer_sampler="Use same sampler", adetailer_active_a=False,
766
+ prompt_ad_a="", negative_prompt_ad_a="", strength_ad_a=0.35, face_detector_ad_a=True, person_detector_ad_a=True, hand_detector_ad_a=False,
767
+ mask_dilation_a=4, mask_blur_a=4, mask_padding_a=32, adetailer_active_b=False, prompt_ad_b="", negative_prompt_ad_b="", strength_ad_b=0.35,
768
+ face_detector_ad_b=True, person_detector_ad_b=True, hand_detector_ad_b=False, mask_dilation_b=4, mask_blur_b=4, mask_padding_b=32,
769
+ active_textual_inversion=False, gpu_duration=59, translate=False, recom_prompt=True, progress=gr.Progress(track_tqdm=True)):
770
  MAX_SEED = np.iinfo(np.int32).max
771
 
 
 
 
 
 
772
  style_prompt = None
773
  style_json = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
774
  hires_before_adetailer = False
775
  hires_after_adetailer = True
 
 
 
 
776
  loop_generation = 1
777
  leave_progress_bar = True
778
  disable_progress_bar = False
 
789
  adapter_conditioning_factor = 0.55
790
  xformers_memory_efficient_attention = False
791
  generator_in_cpu = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
792
  retain_task_cache = True
 
 
 
 
 
 
 
 
 
 
793
  load_lora_cpu = False
794
  verbose_info = False
795
 
 
849
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
850
  model_name=load_diffusers_format_model[0], lora1=None, lora1_wt=1.0, lora2=None, lora2_wt=1.0,
851
  lora3=None, lora3_wt=1.0, lora4=None, lora4_wt=1.0, lora5=None, lora5_wt=1.0, lora6=None, lora6_wt=1.0, lora7=None, lora7_wt=1.0,
852
+ task=TASK_MODEL_LIST[0], prompt_syntax="Classic", sampler="Euler", vae=None, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
853
+ clip_skip=True, pag_scale=0.0, free_u=False, guidance_rescale=0., gpu_duration=59, translate=False, recom_prompt=True, progress=gr.Progress(track_tqdm=True)):
854
  return gr.update()
855
 
856
 
 
870
  return vae_model_list
871
 
872
 
873
+ def update_task_options(model_name, task_name):
874
+ new_choices = MODEL_TYPE_TASK[get_model_type(valid_model_name(model_name))]
875
+
876
+ if task_name not in new_choices:
877
+ task_name = "txt2img"
878
+
879
+ return gr.update(value=task_name, choices=new_choices)
880
+
881
+
882
+ def change_preprocessor_choices(task):
883
+ task = TASK_STABLEPY[task]
884
+ if task in TASK_AND_PREPROCESSORS.keys():
885
+ choices_task = TASK_AND_PREPROCESSORS[task]
886
+ else:
887
+ choices_task = TASK_AND_PREPROCESSORS["canny"]
888
+ return gr.update(choices=choices_task, value=choices_task[0])
889
+
890
+
891
+ def get_ti_choices(model_name: str):
892
+ return get_embed_list(get_model_pipeline(valid_model_name(model_name)))
893
+
894
+
895
+ def update_textual_inversion(active_textual_inversion: bool, model_name: str):
896
+ return gr.update(choices=get_ti_choices(model_name) if active_textual_inversion else [])
897
+
898
+
899
  cached_diffusers_model_tupled_list = get_tupled_model_list(load_diffusers_format_model)
900
  def get_diffusers_model_list(state: dict = {}):
901
  show_diffusers_model_list_detail = get_state(state, "show_diffusers_model_list_detail")
env.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
 
3
  CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
4
  HF_TOKEN = os.environ.get("HF_TOKEN")
@@ -38,98 +39,6 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
38
  'votepurchase/RealVisXL_V4.0',
39
  'votepurchase/juggernautXL_hyper_8step_sfw',
40
  'votepurchase/ponyRealism_v21MainVAE',
41
- 'stabilityai/stable-diffusion-xl-base-1.0',
42
- 'black-forest-labs/FLUX.1-dev',
43
- 'John6666/blue-pencil-flux1-v021-fp8-flux',
44
- 'John6666/wai-ani-flux-v10forfp8-fp8-flux',
45
- 'John6666/xe-anime-flux-v04-fp8-flux',
46
- 'John6666/lyh-anime-flux-v2a1-fp8-flux',
47
- 'John6666/carnival-unchained-v10-fp8-flux',
48
- 'Freepik/flux.1-lite-8B-alpha',
49
- 'ostris/OpenFLUX.1',
50
- 'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
51
- 'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
52
- 'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
53
- 'John6666/noobai-cyberfix-v10-sdxl',
54
- 'John6666/noobaiiter-xl-vpred-v075-sdxl',
55
- 'John6666/complicated-noobai-merge-vprediction-sdxl',
56
- 'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
57
- 'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
58
- 'John6666/chadmix-noobai075-illustrious01-v10-sdxl',
59
- 'OnomaAIResearch/Illustrious-xl-early-release-v0',
60
- 'John6666/obsession-illustriousxl-v21-sdxl',
61
- 'eienmojiki/Anything-XL',
62
- 'eienmojiki/Starry-XL-v5.2',
63
- 'John6666/meinaxl-v2-sdxl',
64
- 'John6666/epicrealism-xl-v10kiss2-sdxl',
65
- 'John6666/epicrealism-xl-v8kiss-sdxl',
66
- 'misri/zavychromaxl_v80',
67
- 'SG161222/RealVisXL_V4.0',
68
- 'SG161222/RealVisXL_V5.0',
69
- 'misri/newrealityxlAllInOne_Newreality40',
70
- 'gsdf/CounterfeitXL',
71
- 'John6666/silvermoon-mix-01xl-v11-sdxl',
72
- 'WhiteAiZ/autismmixSDXL_autismmixConfetti_diffusers',
73
- 'kitty7779/ponyDiffusionV6XL',
74
- 'GraydientPlatformAPI/aniverse-pony',
75
- 'John6666/ras-real-anime-screencap-v1-sdxl',
76
- 'John6666/duchaiten-pony-xl-no-score-v60-sdxl',
77
- 'John6666/mistoon-anime-ponyalpha-sdxl',
78
- 'John6666/3x3x3mixxl-v2-sdxl',
79
- 'John6666/3x3x3mixxl-3dv01-sdxl',
80
- 'John6666/ebara-mfcg-pony-mix-v12-sdxl',
81
- 'John6666/t-ponynai3-v51-sdxl',
82
- 'John6666/t-ponynai3-v65-sdxl',
83
- 'John6666/prefect-pony-xl-v3-sdxl',
84
- 'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
85
- 'John6666/wai-real-mix-v11-sdxl',
86
- 'John6666/wai-c-v6-sdxl',
87
- 'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
88
- 'John6666/sifw-annihilation-xl-v2-sdxl',
89
- 'John6666/photo-realistic-pony-v5-sdxl',
90
- 'John6666/pony-realism-v21main-sdxl',
91
- 'John6666/pony-realism-v22main-sdxl',
92
- 'John6666/cyberrealistic-pony-v63-sdxl',
93
- 'John6666/cyberrealistic-pony-v64-sdxl',
94
- 'John6666/cyberrealistic-pony-v65-sdxl',
95
- 'GraydientPlatformAPI/realcartoon-pony-diffusion',
96
- 'John6666/nova-anime-xl-pony-v5-sdxl',
97
- 'John6666/autismmix-sdxl-autismmix-pony-sdxl',
98
- 'John6666/aimz-dream-real-pony-mix-v3-sdxl',
99
- 'John6666/duchaiten-pony-real-v11fix-sdxl',
100
- 'John6666/duchaiten-pony-real-v20-sdxl',
101
- 'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
102
- 'Eugeoter/artiwaifu-diffusion-2.0',
103
- 'comin/IterComp',
104
- 'KBlueLeaf/Kohaku-XL-Zeta',
105
- 'cagliostrolab/animagine-xl-3.1',
106
- 'yodayo-ai/kivotos-xl-2.0',
107
- 'yodayo-ai/holodayo-xl-2.1',
108
- 'yodayo-ai/clandestine-xl-1.0',
109
- 'digiplay/majicMIX_sombre_v2',
110
- 'digiplay/majicMIX_realistic_v6',
111
- 'digiplay/majicMIX_realistic_v7',
112
- 'digiplay/DreamShaper_8',
113
- 'digiplay/BeautifulArt_v1',
114
- 'digiplay/DarkSushi2.5D_v1',
115
- 'digiplay/darkphoenix3D_v1.1',
116
- 'digiplay/BeenYouLiteL11_diffusers',
117
- 'Yntec/RevAnimatedV2Rebirth',
118
- 'youknownothing/cyberrealistic_v50',
119
- 'youknownothing/deliberate-v6',
120
- 'GraydientPlatformAPI/deliberate-cyber3',
121
- 'GraydientPlatformAPI/picx-real',
122
- 'GraydientPlatformAPI/perfectworld6',
123
- 'emilianJR/epiCRealism',
124
- 'votepurchase/counterfeitV30_v30',
125
- 'votepurchase/ChilloutMix',
126
- 'Meina/MeinaMix_V11',
127
- 'Meina/MeinaUnreal_V5',
128
- 'Meina/MeinaPastel_V7',
129
- 'GraydientPlatformAPI/realcartoon3d-17',
130
- 'GraydientPlatformAPI/realcartoon-pixar11',
131
- 'GraydientPlatformAPI/realcartoon-real17',
132
- 'nitrosocke/Ghibli-Diffusion',
133
  'KBlueLeaf/Kohaku-XL-Epsilon-rev2',
134
  'KBlueLeaf/Kohaku-XL-Epsilon-rev3',
135
  'KBlueLeaf/Kohaku-XL-Zeta',
@@ -138,6 +47,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
138
  'Eugeoter/artiwaifu-diffusion-2.0',
139
  'comin/IterComp',
140
  'OnomaAIResearch/Illustrious-xl-early-release-v0',
 
141
  'Raelina/Rae-Diffusion-XL-V2',
142
  'Raelina/Raemu-XL-V4',
143
  'Raelina/Raehoshi-illust-XL',
@@ -150,17 +60,18 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
150
  "multimodalart/FLUX.1-dev2pro-full",
151
  "Raelina/Raemu-Flux",
152
  ]
 
153
 
154
  DIFFUSERS_FORMAT_LORAS = [
155
  "nerijs/animation2k-flux",
156
  "XLabs-AI/flux-RealismLora",
 
157
  ]
158
 
159
  # List all Models for specified user
160
  HF_MODEL_USER_LIKES = ["votepurchase"] # sorted by number of likes
161
  HF_MODEL_USER_EX = ["John6666"] # sorted by a special rule
162
 
163
-
164
  # - **Download Models**
165
  DOWNLOAD_MODEL_LIST = [
166
  ]
 
1
  import os
2
+ from constants import LOAD_DIFFUSERS_FORMAT_MODEL as LOAD_DIFFUSERS_FORMAT_MODEL_DC
3
 
4
  CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
5
  HF_TOKEN = os.environ.get("HF_TOKEN")
 
39
  'votepurchase/RealVisXL_V4.0',
40
  'votepurchase/juggernautXL_hyper_8step_sfw',
41
  'votepurchase/ponyRealism_v21MainVAE',
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  'KBlueLeaf/Kohaku-XL-Epsilon-rev2',
43
  'KBlueLeaf/Kohaku-XL-Epsilon-rev3',
44
  'KBlueLeaf/Kohaku-XL-Zeta',
 
47
  'Eugeoter/artiwaifu-diffusion-2.0',
48
  'comin/IterComp',
49
  'OnomaAIResearch/Illustrious-xl-early-release-v0',
50
+ 'Laxhar/noobai-XL-1.0',
51
  'Raelina/Rae-Diffusion-XL-V2',
52
  'Raelina/Raemu-XL-V4',
53
  'Raelina/Raehoshi-illust-XL',
 
60
  "multimodalart/FLUX.1-dev2pro-full",
61
  "Raelina/Raemu-Flux",
62
  ]
63
+ LOAD_DIFFUSERS_FORMAT_MODEL = LOAD_DIFFUSERS_FORMAT_MODEL + LOAD_DIFFUSERS_FORMAT_MODEL_DC
64
 
65
  DIFFUSERS_FORMAT_LORAS = [
66
  "nerijs/animation2k-flux",
67
  "XLabs-AI/flux-RealismLora",
68
+ "Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design",
69
  ]
70
 
71
  # List all Models for specified user
72
  HF_MODEL_USER_LIKES = ["votepurchase"] # sorted by number of likes
73
  HF_MODEL_USER_EX = ["John6666"] # sorted by a special rule
74
 
 
75
  # - **Download Models**
76
  DOWNLOAD_MODEL_LIST = [
77
  ]