John6666 commited on
Commit
d6b9053
1 Parent(s): 14fe195

Upload 4 files

Browse files
Files changed (3) hide show
  1. app.py +8 -5
  2. dc.py +9 -12
  3. modutils.py +21 -21
app.py CHANGED
@@ -7,7 +7,8 @@ from dc import (infer, _infer, pass_result, get_diffusers_model_list, get_sample
7
  get_vaes, enable_model_recom_prompt, enable_diffusers_model_detail, extract_exif_data, esrgan_upscale, UPSCALER_KEYS,
8
  preset_quality, preset_styles, process_style_prompt, get_all_lora_tupled_list, update_loras, apply_lora_prompt,
9
  download_my_lora, search_civitai_lora, update_civitai_selection, select_civitai_lora, search_civitai_lora_json,
10
- get_t2i_model_info, get_civitai_tag, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL)
 
11
  # Translator
12
  from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
13
  get_llm_formats, get_dolphin_model_format, get_dolphin_models,
@@ -92,6 +93,8 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
92
 
93
  with gr.Row():
94
  sampler = gr.Dropdown(label="Sampler", choices=get_samplers(), value="Euler")
 
 
95
  vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
96
 
97
  with gr.Accordion("LoRA", open=True, visible=True):
@@ -161,7 +164,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
161
  with gr.Accordion("Select from Gallery", open=False):
162
  lora_search_civitai_gallery = gr.Gallery([], label="Results", allow_preview=False, columns=5, show_share_button=False, interactive=False)
163
  lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
164
- lora_download_url = gr.Textbox(label="LoRA URL", placeholder="https://civitai.com/api/download/models/28907", info="It has to be .safetensors files, and you can also download them from Hugging Face.", lines=1)
165
  lora_download = gr.Button("Get and set LoRA and apply to prompt")
166
 
167
  with gr.Row():
@@ -200,7 +203,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
200
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
201
  guidance_scale, num_inference_steps, model_name,
202
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
203
- sampler, vae_model, auto_trans],
204
  outputs=[result],
205
  queue=True,
206
  show_progress="full",
@@ -213,7 +216,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
213
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
214
  guidance_scale, num_inference_steps, model_name,
215
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
216
- sampler, vae_model, auto_trans],
217
  outputs=[result],
218
  queue=False,
219
  show_api=True,
@@ -236,7 +239,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
236
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
237
  guidance_scale, num_inference_steps, model_name,
238
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
239
- sampler, vae_model],
240
  outputs=[result],
241
  queue=True,
242
  show_progress="full",
 
7
  get_vaes, enable_model_recom_prompt, enable_diffusers_model_detail, extract_exif_data, esrgan_upscale, UPSCALER_KEYS,
8
  preset_quality, preset_styles, process_style_prompt, get_all_lora_tupled_list, update_loras, apply_lora_prompt,
9
  download_my_lora, search_civitai_lora, update_civitai_selection, select_civitai_lora, search_civitai_lora_json,
10
+ get_t2i_model_info, get_civitai_tag, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
11
+ SCHEDULE_TYPE_OPTIONS, SCHEDULE_PREDICTION_TYPE_OPTIONS)
12
  # Translator
13
  from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
14
  get_llm_formats, get_dolphin_model_format, get_dolphin_models,
 
93
 
94
  with gr.Row():
95
  sampler = gr.Dropdown(label="Sampler", choices=get_samplers(), value="Euler")
96
+ schedule_type = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
97
+ schedule_prediction_type = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
98
  vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
99
 
100
  with gr.Accordion("LoRA", open=True, visible=True):
 
164
  with gr.Accordion("Select from Gallery", open=False):
165
  lora_search_civitai_gallery = gr.Gallery([], label="Results", allow_preview=False, columns=5, show_share_button=False, interactive=False)
166
  lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
167
+ lora_download_url = gr.Textbox(label="LoRA's download URL", placeholder="https://civitai.com/api/download/models/28907", info="It has to be .safetensors files, and you can also download them from Hugging Face.", lines=1)
168
  lora_download = gr.Button("Get and set LoRA and apply to prompt")
169
 
170
  with gr.Row():
 
203
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
204
  guidance_scale, num_inference_steps, model_name,
205
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
206
+ sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type],
207
  outputs=[result],
208
  queue=True,
209
  show_progress="full",
 
216
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
217
  guidance_scale, num_inference_steps, model_name,
218
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
219
+ sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type],
220
  outputs=[result],
221
  queue=False,
222
  show_api=True,
 
239
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
240
  guidance_scale, num_inference_steps, model_name,
241
  lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
242
+ sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type],
243
  outputs=[result],
244
  queue=True,
245
  show_progress="full",
dc.py CHANGED
@@ -690,29 +690,29 @@ sd_gen = GuiSD()
690
 
691
  from pathlib import Path
692
  from PIL import Image
693
- import random, json
 
 
 
 
694
  from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
695
  get_local_model_list, get_private_lora_model_lists, get_valid_lora_name,
696
  get_valid_lora_path, get_valid_lora_wt, get_lora_info, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
697
  normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history)
698
 
699
 
700
-
701
  #@spaces.GPU
702
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
703
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
704
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
705
- sampler = "Euler", vae = None, translate=True, progress=gr.Progress(track_tqdm=True)):
706
- import PIL
707
- import numpy as np
708
  MAX_SEED = np.iinfo(np.int32).max
709
 
710
  image_previews = True
711
  load_lora_cpu = False
712
  verbose_info = False
713
  gpu_duration = 59
714
- schedule_type = SCHEDULE_TYPE_OPTIONS[0]
715
- schedule_prediction_type = SCHEDULE_PREDICTION_TYPE_OPTIONS[0]
716
  filename_pattern = "model,seed"
717
 
718
  images: list[tuple[PIL.Image.Image, str | None]] = []
@@ -766,8 +766,6 @@ def __infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidan
766
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
767
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
768
  sampler = "Euler a", vae = None, translate=True, progress=gr.Progress(track_tqdm=True)):
769
- import PIL
770
- import numpy as np
771
  MAX_SEED = np.iinfo(np.int32).max
772
 
773
  load_lora_cpu = False
@@ -824,7 +822,8 @@ def __infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidan
824
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
825
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
826
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
827
- sampler = "Euler a", vae = None, translate = True, progress=gr.Progress(track_tqdm=True)):
 
828
  return gr.update(visible=True)
829
 
830
 
@@ -868,7 +867,6 @@ def enable_diffusers_model_detail(is_enable: bool = False, model_name: str = "")
868
 
869
 
870
  def load_model_prompt_dict():
871
- import json
872
  dict = {}
873
  try:
874
  with open('model_dict.json', encoding='utf-8') as f:
@@ -1005,7 +1003,6 @@ def download_lora(dl_urls: str):
1005
 
1006
 
1007
  def copy_lora(path: str, new_path: str):
1008
- import shutil
1009
  if path == new_path: return new_path
1010
  cpath = Path(path)
1011
  npath = Path(new_path)
 
690
 
691
  from pathlib import Path
692
  from PIL import Image
693
+ import PIL
694
+ import numpy as np
695
+ import random
696
+ import json
697
+ import shutil
698
  from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
699
  get_local_model_list, get_private_lora_model_lists, get_valid_lora_name,
700
  get_valid_lora_path, get_valid_lora_wt, get_lora_info, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
701
  normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history)
702
 
703
 
 
704
  #@spaces.GPU
705
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
706
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
707
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
708
+ sampler = "Euler", vae = None, translate=True, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
709
+ progress=gr.Progress(track_tqdm=True)):
 
710
  MAX_SEED = np.iinfo(np.int32).max
711
 
712
  image_previews = True
713
  load_lora_cpu = False
714
  verbose_info = False
715
  gpu_duration = 59
 
 
716
  filename_pattern = "model,seed"
717
 
718
  images: list[tuple[PIL.Image.Image, str | None]] = []
 
766
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
767
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
768
  sampler = "Euler a", vae = None, translate=True, progress=gr.Progress(track_tqdm=True)):
 
 
769
  MAX_SEED = np.iinfo(np.int32).max
770
 
771
  load_lora_cpu = False
 
822
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
823
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
824
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
825
+ sampler = "Euler", vae = None, translate = True, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
826
+ progress=gr.Progress(track_tqdm=True)):
827
  return gr.update(visible=True)
828
 
829
 
 
867
 
868
 
869
  def load_model_prompt_dict():
 
870
  dict = {}
871
  try:
872
  with open('model_dict.json', encoding='utf-8') as f:
 
1003
 
1004
 
1005
  def copy_lora(path: str, new_path: str):
 
1006
  if path == new_path: return new_path
1007
  cpath = Path(path)
1008
  npath = Path(new_path)
modutils.py CHANGED
@@ -960,7 +960,7 @@ def move_file_lora(filepaths):
960
 
961
  CIVITAI_SORT = ["Highest Rated", "Most Downloaded", "Newest"]
962
  CIVITAI_PERIOD = ["AllTime", "Year", "Month", "Week", "Day"]
963
- CIVITAI_BASEMODEL = ["Pony", "SD 1.5", "SDXL 1.0", "Flux.1 D", "Flux.1 S"]
964
 
965
 
966
  def get_civitai_info(path):
@@ -1320,10 +1320,10 @@ style_list = [
1320
 
1321
 
1322
  optimization_list = {
1323
- "None": [28, 7., 'Euler a', False, 'None', 1.],
1324
- "Default": [28, 7., 'Euler a', False, 'None', 1.],
1325
- "SPO": [28, 7., 'Euler a', True, 'loras/spo_sdxl_10ep_4k-data_lora_diffusers.safetensors', 1.],
1326
- "DPO": [28, 7., 'Euler a', True, 'loras/sdxl-DPO-LoRA.safetensors', 1.],
1327
  "DPO Turbo": [8, 2.5, 'LCM', True, 'loras/sd_xl_dpo_turbo_lora_v1-128dim.safetensors', 1.],
1328
  "SDXL Turbo": [8, 2.5, 'LCM', True, 'loras/sd_xl_turbo_lora_v1.safetensors', 1.],
1329
  "Hyper-SDXL 12step": [12, 5., 'TCD', True, 'loras/Hyper-SDXL-12steps-CFG-lora.safetensors', 1.],
@@ -1331,10 +1331,10 @@ optimization_list = {
1331
  "Hyper-SDXL 4step": [4, 0, 'TCD', True, 'loras/Hyper-SDXL-4steps-lora.safetensors', 1.],
1332
  "Hyper-SDXL 2step": [2, 0, 'TCD', True, 'loras/Hyper-SDXL-2steps-lora.safetensors', 1.],
1333
  "Hyper-SDXL 1step": [1, 0, 'TCD', True, 'loras/Hyper-SDXL-1steps-lora.safetensors', 1.],
1334
- "PCM 16step": [16, 4., 'Euler a trailing', True, 'loras/pcm_sdxl_normalcfg_16step_converted.safetensors', 1.],
1335
- "PCM 8step": [8, 4., 'Euler a trailing', True, 'loras/pcm_sdxl_normalcfg_8step_converted.safetensors', 1.],
1336
- "PCM 4step": [4, 2., 'Euler a trailing', True, 'loras/pcm_sdxl_smallcfg_4step_converted.safetensors', 1.],
1337
- "PCM 2step": [2, 1., 'Euler a trailing', True, 'loras/pcm_sdxl_smallcfg_2step_converted.safetensors', 1.],
1338
  }
1339
 
1340
 
@@ -1362,13 +1362,13 @@ def set_optimization(opt, steps_gui, cfg_gui, sampler_gui, clip_skip_gui, lora_g
1362
 
1363
  # [sampler_gui, steps_gui, cfg_gui, clip_skip_gui, img_width_gui, img_height_gui, optimization_gui]
1364
  preset_sampler_setting = {
1365
- "None": ["Euler a", 28, 7., True, 1024, 1024, "None"],
1366
  "Anime 3:4 Fast": ["LCM", 8, 2.5, True, 896, 1152, "DPO Turbo"],
1367
- "Anime 3:4 Standard": ["Euler a", 28, 7., True, 896, 1152, "None"],
1368
- "Anime 3:4 Heavy": ["Euler a", 40, 7., True, 896, 1152, "None"],
1369
  "Anime 1:1 Fast": ["LCM", 8, 2.5, True, 1024, 1024, "DPO Turbo"],
1370
- "Anime 1:1 Standard": ["Euler a", 28, 7., True, 1024, 1024, "None"],
1371
- "Anime 1:1 Heavy": ["Euler a", 40, 7., True, 1024, 1024, "None"],
1372
  "Photo 3:4 Fast": ["LCM", 8, 2.5, False, 896, 1152, "DPO Turbo"],
1373
  "Photo 3:4 Standard": ["DPM++ 2M Karras", 28, 7., False, 896, 1152, "None"],
1374
  "Photo 3:4 Heavy": ["DPM++ 2M Karras", 40, 7., False, 896, 1152, "None"],
@@ -1380,9 +1380,9 @@ preset_sampler_setting = {
1380
 
1381
  def set_sampler_settings(sampler_setting):
1382
  if not sampler_setting in list(preset_sampler_setting.keys()) or sampler_setting == "None":
1383
- return gr.update(value="Euler a"), gr.update(value=28), gr.update(value=7.), gr.update(value=True),\
1384
  gr.update(value=1024), gr.update(value=1024), gr.update(value="None")
1385
- v = preset_sampler_setting.get(sampler_setting, ["Euler a", 28, 7., True, 1024, 1024])
1386
  # sampler, steps, cfg, clip_skip, width, height, optimization
1387
  return gr.update(value=v[0]), gr.update(value=v[1]), gr.update(value=v[2]), gr.update(value=v[3]),\
1388
  gr.update(value=v[4]), gr.update(value=v[5]), gr.update(value=v[6])
@@ -1573,7 +1573,7 @@ EXAMPLES_GUI = [
1573
  7.5,
1574
  True,
1575
  -1,
1576
- "Euler a",
1577
  1152,
1578
  896,
1579
  "votepurchase/animagine-xl-3.1",
@@ -1586,7 +1586,7 @@ EXAMPLES_GUI = [
1586
  5.,
1587
  True,
1588
  -1,
1589
- "Euler a",
1590
  1024,
1591
  1024,
1592
  "votepurchase/ponyDiffusionV6XL",
@@ -1599,7 +1599,7 @@ EXAMPLES_GUI = [
1599
  7.0,
1600
  True,
1601
  -1,
1602
- "Euler a",
1603
  1024,
1604
  1024,
1605
  "Raelina/Rae-Diffusion-XL-V2",
@@ -1612,7 +1612,7 @@ EXAMPLES_GUI = [
1612
  7.0,
1613
  True,
1614
  -1,
1615
- "Euler a",
1616
  1024,
1617
  1024,
1618
  "Raelina/Raemu-XL-V4",
@@ -1625,7 +1625,7 @@ EXAMPLES_GUI = [
1625
  7.,
1626
  True,
1627
  -1,
1628
- "Euler a",
1629
  1024,
1630
  1024,
1631
  "cagliostrolab/animagine-xl-3.1",
 
960
 
961
  CIVITAI_SORT = ["Highest Rated", "Most Downloaded", "Newest"]
962
  CIVITAI_PERIOD = ["AllTime", "Year", "Month", "Week", "Day"]
963
+ CIVITAI_BASEMODEL = ["Pony", "Illustrious", "SDXL 1.0", "SD 1.5", "Flux.1 D", "Flux.1 S"]
964
 
965
 
966
  def get_civitai_info(path):
 
1320
 
1321
 
1322
  optimization_list = {
1323
+ "None": [28, 7., 'Euler', False, 'None', 1.],
1324
+ "Default": [28, 7., 'Euler', False, 'None', 1.],
1325
+ "SPO": [28, 7., 'Euler', True, 'loras/spo_sdxl_10ep_4k-data_lora_diffusers.safetensors', 1.],
1326
+ "DPO": [28, 7., 'Euler', True, 'loras/sdxl-DPO-LoRA.safetensors', 1.],
1327
  "DPO Turbo": [8, 2.5, 'LCM', True, 'loras/sd_xl_dpo_turbo_lora_v1-128dim.safetensors', 1.],
1328
  "SDXL Turbo": [8, 2.5, 'LCM', True, 'loras/sd_xl_turbo_lora_v1.safetensors', 1.],
1329
  "Hyper-SDXL 12step": [12, 5., 'TCD', True, 'loras/Hyper-SDXL-12steps-CFG-lora.safetensors', 1.],
 
1331
  "Hyper-SDXL 4step": [4, 0, 'TCD', True, 'loras/Hyper-SDXL-4steps-lora.safetensors', 1.],
1332
  "Hyper-SDXL 2step": [2, 0, 'TCD', True, 'loras/Hyper-SDXL-2steps-lora.safetensors', 1.],
1333
  "Hyper-SDXL 1step": [1, 0, 'TCD', True, 'loras/Hyper-SDXL-1steps-lora.safetensors', 1.],
1334
+ "PCM 16step": [16, 4., 'Euler trailing', True, 'loras/pcm_sdxl_normalcfg_16step_converted.safetensors', 1.],
1335
+ "PCM 8step": [8, 4., 'Euler trailing', True, 'loras/pcm_sdxl_normalcfg_8step_converted.safetensors', 1.],
1336
+ "PCM 4step": [4, 2., 'Euler trailing', True, 'loras/pcm_sdxl_smallcfg_4step_converted.safetensors', 1.],
1337
+ "PCM 2step": [2, 1., 'Euler trailing', True, 'loras/pcm_sdxl_smallcfg_2step_converted.safetensors', 1.],
1338
  }
1339
 
1340
 
 
1362
 
1363
  # [sampler_gui, steps_gui, cfg_gui, clip_skip_gui, img_width_gui, img_height_gui, optimization_gui]
1364
  preset_sampler_setting = {
1365
+ "None": ["Euler", 28, 7., True, 1024, 1024, "None"],
1366
  "Anime 3:4 Fast": ["LCM", 8, 2.5, True, 896, 1152, "DPO Turbo"],
1367
+ "Anime 3:4 Standard": ["Euler", 28, 7., True, 896, 1152, "None"],
1368
+ "Anime 3:4 Heavy": ["Euler", 40, 7., True, 896, 1152, "None"],
1369
  "Anime 1:1 Fast": ["LCM", 8, 2.5, True, 1024, 1024, "DPO Turbo"],
1370
+ "Anime 1:1 Standard": ["Euler", 28, 7., True, 1024, 1024, "None"],
1371
+ "Anime 1:1 Heavy": ["Euler", 40, 7., True, 1024, 1024, "None"],
1372
  "Photo 3:4 Fast": ["LCM", 8, 2.5, False, 896, 1152, "DPO Turbo"],
1373
  "Photo 3:4 Standard": ["DPM++ 2M Karras", 28, 7., False, 896, 1152, "None"],
1374
  "Photo 3:4 Heavy": ["DPM++ 2M Karras", 40, 7., False, 896, 1152, "None"],
 
1380
 
1381
  def set_sampler_settings(sampler_setting):
1382
  if not sampler_setting in list(preset_sampler_setting.keys()) or sampler_setting == "None":
1383
+ return gr.update(value="Euler"), gr.update(value=28), gr.update(value=7.), gr.update(value=True),\
1384
  gr.update(value=1024), gr.update(value=1024), gr.update(value="None")
1385
+ v = preset_sampler_setting.get(sampler_setting, ["Euler", 28, 7., True, 1024, 1024])
1386
  # sampler, steps, cfg, clip_skip, width, height, optimization
1387
  return gr.update(value=v[0]), gr.update(value=v[1]), gr.update(value=v[2]), gr.update(value=v[3]),\
1388
  gr.update(value=v[4]), gr.update(value=v[5]), gr.update(value=v[6])
 
1573
  7.5,
1574
  True,
1575
  -1,
1576
+ "Euler",
1577
  1152,
1578
  896,
1579
  "votepurchase/animagine-xl-3.1",
 
1586
  5.,
1587
  True,
1588
  -1,
1589
+ "Euler",
1590
  1024,
1591
  1024,
1592
  "votepurchase/ponyDiffusionV6XL",
 
1599
  7.0,
1600
  True,
1601
  -1,
1602
+ "Euler",
1603
  1024,
1604
  1024,
1605
  "Raelina/Rae-Diffusion-XL-V2",
 
1612
  7.0,
1613
  True,
1614
  -1,
1615
+ "Euler",
1616
  1024,
1617
  1024,
1618
  "Raelina/Raemu-XL-V4",
 
1625
  7.,
1626
  True,
1627
  -1,
1628
+ "Euler",
1629
  1024,
1630
  1024,
1631
  "cagliostrolab/animagine-xl-3.1",