John6666 commited on
Commit
8d15848
·
verified ·
1 Parent(s): be7642d

Upload 6 files

Browse files
Files changed (6) hide show
  1. app.py +252 -167
  2. constants.py +77 -82
  3. image_processor.py +130 -0
  4. modutils.py +65 -21
  5. requirements.txt +1 -1
  6. utils.py +4 -0
app.py CHANGED
@@ -5,9 +5,9 @@ from stablepy import (
5
  SCHEDULE_TYPE_OPTIONS,
6
  SCHEDULE_PREDICTION_TYPE_OPTIONS,
7
  check_scheduler_compatibility,
 
8
  )
9
  from constants import (
10
- PREPROCESSOR_CONTROLNET,
11
  TASK_STABLEPY,
12
  TASK_MODEL_LIST,
13
  UPSCALER_DICT_GUI,
@@ -17,6 +17,7 @@ from constants import (
17
  SDXL_TASK,
18
  MODEL_TYPE_TASK,
19
  POST_PROCESSING_SAMPLER,
 
20
 
21
  )
22
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
@@ -42,15 +43,18 @@ from utils import (
42
  html_template_message,
43
  escape_html,
44
  )
 
45
  from datetime import datetime
46
  import gradio as gr
47
  import logging
48
  import diffusers
49
  import warnings
50
  from stablepy import logger
 
51
  # import urllib.parse
52
 
53
  ImageFile.LOAD_TRUNCATED_IMAGES = True
 
54
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
55
  print(os.getenv("SPACES_ZERO_GPU"))
56
 
@@ -61,7 +65,7 @@ from modutils import (list_uniq, download_private_repo, get_model_id_list, get_t
61
  update_civitai_selection, get_civitai_tag, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
62
  set_textual_inversion_prompt, get_model_pipeline, change_interface_mode, get_t2i_model_info,
63
  get_tupled_model_list, save_gallery_images, save_gallery_history, set_optimization, set_sampler_settings,
64
- set_quick_presets, process_style_prompt, optimization_list, save_images, download_things,
65
  preset_styles, preset_quality, preset_sampler_setting, translate_to_en, EXAMPLES_GUI, RESOURCES)
66
  from env import (HF_TOKEN, CIVITAI_API_KEY, HF_LORA_ESSENTIAL_PRIVATE_REPO, HF_VAE_PRIVATE_REPO,
67
  HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO,
@@ -116,21 +120,25 @@ def get_embed_list(pipeline_name):
116
 
117
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
118
 
 
 
 
 
 
 
 
 
 
119
 
120
  #######################
121
  # GUI
122
  #######################
123
- import gradio as gr
124
- import logging
125
  logging.getLogger("diffusers").setLevel(logging.ERROR)
126
- import diffusers
127
  diffusers.utils.logging.set_verbosity(40)
128
- import warnings
129
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
130
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
131
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
132
  ## BEGIN MOD
133
- from stablepy import logger
134
  #logger.setLevel(logging.CRITICAL)
135
  logger.setLevel(logging.DEBUG)
136
 
@@ -173,12 +181,14 @@ class GuiSD:
173
  ] + [model_name]
174
  print(self.inventory)
175
 
176
- def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
177
-
178
- self.update_storage_models()
179
 
180
  # download link model > model_name
181
 
 
 
 
 
182
  vae_model = vae_model if vae_model != "None" else None
183
  model_type = get_model_type(model_name)
184
  dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
@@ -230,17 +240,19 @@ class GuiSD:
230
  vae_model=vae_model,
231
  type_model_precision=dtype_model,
232
  retain_task_model_in_cache=False,
 
233
  device="cpu",
 
234
  )
 
235
  else:
236
-
237
  if self.model.base_model_id != model_name:
238
  load_now_time = datetime.now()
239
  elapsed_time = max((load_now_time - self.last_load).total_seconds(), 0)
240
 
241
- if elapsed_time <= 8:
242
  print("Waiting for the previous model's time ops...")
243
- time.sleep(8-elapsed_time)
244
 
245
  self.model.device = torch.device("cpu")
246
  self.model.load_pipe(
@@ -249,6 +261,7 @@ class GuiSD:
249
  vae_model=vae_model,
250
  type_model_precision=dtype_model,
251
  retain_task_model_in_cache=False,
 
252
  )
253
 
254
  end_time = time.time()
@@ -285,6 +298,10 @@ class GuiSD:
285
  lora_scale4,
286
  lora5,
287
  lora_scale5,
 
 
 
 
288
  sampler,
289
  schedule_type,
290
  schedule_prediction_type,
@@ -305,6 +322,8 @@ class GuiSD:
305
  high_threshold,
306
  value_threshold,
307
  distance_threshold,
 
 
308
  controlnet_output_scaling_in_unet,
309
  controlnet_start_threshold,
310
  controlnet_stop_threshold,
@@ -321,6 +340,9 @@ class GuiSD:
321
  hires_negative_prompt,
322
  hires_before_adetailer,
323
  hires_after_adetailer,
 
 
 
324
  loop_generation,
325
  leave_progress_bar,
326
  disable_progress_bar,
@@ -362,6 +384,7 @@ class GuiSD:
362
  mask_blur_b,
363
  mask_padding_b,
364
  retain_task_cache_gui,
 
365
  image_ip1,
366
  mask_ip1,
367
  model_ip1,
@@ -378,7 +401,7 @@ class GuiSD:
378
  yield info_state, gr.update(), gr.update()
379
 
380
  vae_model = vae_model if vae_model != "None" else None
381
- loras_list = [lora1, lora2, lora3, lora4, lora5]
382
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
383
  msg_lora = ""
384
 
@@ -386,9 +409,9 @@ class GuiSD:
386
  loras_list = [s if s else "None" for s in loras_list]
387
  global lora_model_list
388
  lora_model_list = get_lora_model_list()
389
- lora1, lora_scale1, lora2, lora_scale2, lora3, lora_scale3, lora4, lora_scale4, lora5, lora_scale5 = \
390
  set_prompt_loras(prompt, syntax_weights, model_name, lora1, lora_scale1, lora2, lora_scale2, lora3,
391
- lora_scale3, lora4, lora_scale4, lora5, lora_scale5)
392
  ## END MOD
393
 
394
  print("Config model:", model_name, vae_model, loras_list)
@@ -490,6 +513,8 @@ class GuiSD:
490
  "high_threshold": high_threshold,
491
  "value_threshold": value_threshold,
492
  "distance_threshold": distance_threshold,
 
 
493
  "lora_A": lora1 if lora1 != "None" else None,
494
  "lora_scale_A": lora_scale1,
495
  "lora_B": lora2 if lora2 != "None" else None,
@@ -500,6 +525,10 @@ class GuiSD:
500
  "lora_scale_D": lora_scale4,
501
  "lora_E": lora5 if lora5 != "None" else None,
502
  "lora_scale_E": lora_scale5,
 
 
 
 
503
  ## BEGIN MOD
504
  "textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
505
  ## END MOD
@@ -543,6 +572,8 @@ class GuiSD:
543
  "hires_sampler": hires_sampler,
544
  "hires_before_adetailer": hires_before_adetailer,
545
  "hires_after_adetailer": hires_after_adetailer,
 
 
546
  "ip_adapter_image": params_ip_img,
547
  "ip_adapter_mask": params_ip_msk,
548
  "ip_adapter_model": params_ip_model,
@@ -550,8 +581,12 @@ class GuiSD:
550
  "ip_adapter_scale": params_ip_scale,
551
  }
552
 
 
 
 
 
553
  self.model.device = torch.device("cuda:0")
554
- if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * 5:
555
  self.model.pipe.transformer.to(self.model.device)
556
  print("transformer to cuda")
557
 
@@ -579,7 +614,7 @@ class GuiSD:
579
  if msg_lora:
580
  info_images += msg_lora
581
 
582
- info_images = info_images + "<br>" + "GENERATION DATA:<br>" + escape_html(metadata[0]) + "<br>-------<br>"
583
 
584
  download_links = "<br>".join(
585
  [
@@ -614,37 +649,38 @@ def dummy_gpu():
614
 
615
 
616
  def sd_gen_generate_pipeline(*args):
617
-
618
  gpu_duration_arg = int(args[-1]) if args[-1] else 59
619
  verbose_arg = int(args[-2])
620
  load_lora_cpu = args[-3]
621
  generation_args = args[:-3]
622
  lora_list = [
623
  None if item == "None" or item == "" else item # MOD
624
- for item in [args[7], args[9], args[11], args[13], args[15]]
625
  ]
626
- lora_status = [None] * 5
627
 
628
  msg_load_lora = "Updating LoRAs in GPU..."
629
  if load_lora_cpu:
630
- msg_load_lora = "Updating LoRAs in CPU (Slow but saves GPU usage)..."
631
 
632
- if lora_list != sd_gen.model.lora_memory and lora_list != [None] * 5:
633
  yield msg_load_lora, gr.update(), gr.update()
634
 
635
  # Load lora in CPU
636
  if load_lora_cpu:
637
- lora_status = sd_gen.model.lora_merge(
638
  lora_A=lora_list[0], lora_scale_A=args[8],
639
  lora_B=lora_list[1], lora_scale_B=args[10],
640
  lora_C=lora_list[2], lora_scale_C=args[12],
641
  lora_D=lora_list[3], lora_scale_D=args[14],
642
  lora_E=lora_list[4], lora_scale_E=args[16],
 
 
643
  )
644
  print(lora_status)
645
 
646
- sampler_name = args[17]
647
- schedule_type_name = args[18]
648
  _, _, msg_sampler = check_scheduler_compatibility(
649
  sd_gen.model.class_name, sampler_name, schedule_type_name
650
  )
@@ -658,7 +694,7 @@ def sd_gen_generate_pipeline(*args):
658
  elif status is not None:
659
  gr.Warning(f"Failed to load LoRA: {lora}")
660
 
661
- if lora_status == [None] * 5 and sd_gen.model.lora_memory != [None] * 5 and load_lora_cpu:
662
  lora_cache_msg = ", ".join(
663
  str(x) for x in sd_gen.model.lora_memory if x is not None
664
  )
@@ -715,6 +751,7 @@ def esrgan_upscale(image, upscaler_name, upscaler_size):
715
  return image_path
716
 
717
 
 
718
  dynamic_gpu_duration.zerogpu = True
719
  sd_gen_generate_pipeline.zerogpu = True
720
  sd_gen = GuiSD()
@@ -740,7 +777,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
740
  with gr.Column():
741
  with gr.Tab("Generation"):
742
  with gr.Row():
743
- with gr.Column(scale=2):
744
 
745
  def update_task_options(model_name, task_name):
746
  new_choices = MODEL_TYPE_TASK[get_model_type(model_name)]
@@ -781,7 +818,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
781
  generate_from_image_btn_gui = gr.Button(value="GENERATE TAGS FROM IMAGE")
782
  prompt_gui = gr.Textbox(lines=6, placeholder="1girl, solo, ...", label="Prompt", show_copy_button=True)
783
  with gr.Accordion("Negative prompt, etc.", open=False) as menu_negative:
784
- neg_prompt_gui = gr.Textbox(lines=3, placeholder="lowres, (bad), ...", label="Negative prompt", show_copy_button=True)
785
  translate_prompt_button = gr.Button(value="Translate prompt to English", size="sm", variant="secondary")
786
  with gr.Row():
787
  insert_prompt_gui = gr.Radio(label="Insert reccomended positive / negative prompt", choices=["None", "Auto", "Animagine", "Pony"], value="Auto", interactive=True)
@@ -819,11 +856,14 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
819
  label="Generated images",
820
  show_label=False,
821
  elem_id="gallery",
822
- columns=[2],
823
- rows=[2],
 
 
824
  object_fit="contain",
825
  # height="auto",
826
  interactive=False,
 
827
  preview=False,
828
  show_share_button=False,
829
  show_download_button=True,
@@ -846,7 +886,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
846
  gpu_duration_gui = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
847
  with gr.Column():
848
  verbose_info_gui = gr.Checkbox(value=False, container=False, label="Status info")
849
- load_lora_cpu_gui = gr.Checkbox(value=False, container=False, label="Load LoRAs on CPU (Save GPU time)")
850
 
851
  with gr.Column(scale=1):
852
  with gr.Accordion("Generation settings", open=False, visible=True) as menu_gen:
@@ -855,6 +895,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
855
  img_height_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Height")
856
  steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=28, label="Steps")
857
  cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7.0, label="CFG")
 
858
  with gr.Row():
859
  seed_gui = gr.Number(minimum=-1, maximum=2**32-1, value=-1, label="Seed")
860
  pag_scale_gui = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
@@ -864,118 +905,119 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
864
  with gr.Row():
865
  sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler")
866
  schedule_type_gui = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
 
867
  vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
868
  prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[1][1])
869
 
870
- with gr.Row(equal_height=False):
871
-
872
- def run_set_params_gui(base_prompt, name_model):
873
- valid_receptors = { # default values
874
- "prompt": gr.update(value=base_prompt),
875
- "neg_prompt": gr.update(value=""),
876
- "Steps": gr.update(value=30),
877
- "width": gr.update(value=1024),
878
- "height": gr.update(value=1024),
879
- "Seed": gr.update(value=-1),
880
- "Sampler": gr.update(value="Euler"),
881
- "CFG scale": gr.update(value=7.), # cfg
882
- "Clip skip": gr.update(value=True),
883
- "Model": gr.update(value=name_model),
884
- "Schedule type": gr.update(value="Automatic"),
885
- "PAG": gr.update(value=.0),
886
- "FreeU": gr.update(value=False),
887
- }
888
- valid_keys = list(valid_receptors.keys())
889
-
890
- parameters = extract_parameters(base_prompt)
891
- # print(parameters)
892
-
893
- if "Sampler" in parameters:
894
- value_sampler = parameters["Sampler"]
895
- for s_type in SCHEDULE_TYPE_OPTIONS:
896
- if s_type in value_sampler:
897
- value_sampler = value_sampler.replace(s_type, "").strip()
898
- parameters["Sampler"] = value_sampler
899
- parameters["Schedule type"] = s_type
900
-
901
- for key, val in parameters.items():
902
- # print(val)
903
- if key in valid_keys:
904
- try:
905
- if key == "Sampler":
906
- if val not in scheduler_names:
907
- continue
908
- if key == "Schedule type":
909
- if val not in SCHEDULE_TYPE_OPTIONS:
910
- val = "Automatic"
911
- elif key == "Clip skip":
912
- if "," in str(val):
913
- val = val.replace(",", "")
914
- if int(val) >= 2:
 
 
 
 
 
 
 
 
 
 
915
  val = True
916
- if key == "prompt":
917
- if ">" in val and "<" in val:
918
- val = re.sub(r'<[^>]+>', '', val)
919
- print("Removed LoRA written in the prompt")
920
- if key in ["prompt", "neg_prompt"]:
921
- val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
922
- if key in ["Steps", "width", "height", "Seed"]:
923
- val = int(val)
924
- if key == "FreeU":
925
- val = True
926
- if key in ["CFG scale", "PAG"]:
927
- val = float(val)
928
- if key == "Model":
929
- filtered_models = [m for m in model_list if val in m]
930
- if filtered_models:
931
- val = filtered_models[0]
932
- else:
933
- val = name_model
934
- if key == "Seed":
935
- continue
936
- valid_receptors[key] = gr.update(value=val)
937
- # print(val, type(val))
938
- # print(valid_receptors)
939
- except Exception as e:
940
- print(str(e))
941
- return [value for value in valid_receptors.values()]
942
-
943
- set_params_gui.click(
944
- run_set_params_gui, [prompt_gui, model_name_gui], [
945
- prompt_gui,
946
- neg_prompt_gui,
947
- steps_gui,
948
- img_width_gui,
949
- img_height_gui,
950
- seed_gui,
951
- sampler_gui,
952
- cfg_gui,
953
- clip_skip_gui,
954
- model_name_gui,
955
- schedule_type_gui,
956
- pag_scale_gui,
957
- free_u_gui,
958
- ],
959
- )
960
-
961
- def run_clear_prompt_gui():
962
- return gr.update(value=""), gr.update(value="")
963
- clear_prompt_gui.click(
964
- run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
965
- )
966
-
967
- def run_set_random_seed():
968
- return -1
969
- set_random_seed.click(
970
- run_set_random_seed, [], seed_gui
971
- )
972
 
973
  with gr.Accordion("LoRA", open=False, visible=True) as menu_lora:
974
- def lora_dropdown(label):
975
- return gr.Dropdown(label=label, choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
976
 
977
- def lora_scale_slider(label):
978
- return gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label=label)
979
 
980
  def lora_textbox(label):
981
  return gr.Textbox(label=label, info="Example of prompt:", value="None", show_copy_button=True, interactive=False, visible=False)
@@ -1021,6 +1063,22 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1021
  lora5_info_gui = lora_textbox("LoRA5 prompts")
1022
  lora5_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
1023
  lora5_desc_gui = gr.Markdown(value="", visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1024
  with gr.Accordion("From URL", open=True, visible=True):
1025
  with gr.Row():
1026
  search_civitai_basemodel_lora = gr.CheckboxGroup(label="Search LoRA for", choices=CIVITAI_BASEMODEL, value=["Pony", "Illustrious", "SDXL 1.0"])
@@ -1037,7 +1095,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1037
  search_civitai_result_lora = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
1038
  with gr.Row():
1039
  text_lora = gr.Textbox(label="LoRA's download URL", placeholder="https://civitai.com/api/download/models/28907", info="It has to be .safetensors files, and you can also download them from Hugging Face.", lines=1, scale=4)
1040
- romanize_text = gr.Checkbox(value=False, label="Transliterate name", scale=1)
1041
  button_lora = gr.Button("Get and Refresh the LoRA Lists")
1042
  new_lora_status = gr.HTML()
1043
  with gr.Accordion("From Local", open=True, visible=True):
@@ -1055,6 +1113,9 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1055
  hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
1056
  hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
1057
  hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
 
 
 
1058
  hires_prompt_gui = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
1059
  hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
1060
 
@@ -1121,14 +1182,23 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1121
  minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution",
1122
  info="The maximum proportional size of the generated image based on the uploaded image."
1123
  )
1124
- preprocessor_name_gui = gr.Dropdown(label="Preprocessor Name", choices=PREPROCESSOR_CONTROLNET["canny"])
 
 
 
 
 
 
 
 
 
1125
 
1126
  def change_preprocessor_choices(task):
1127
  task = TASK_STABLEPY[task]
1128
- if task in PREPROCESSOR_CONTROLNET.keys():
1129
- choices_task = PREPROCESSOR_CONTROLNET[task]
1130
  else:
1131
- choices_task = PREPROCESSOR_CONTROLNET["canny"]
1132
  return gr.update(choices=choices_task, value=choices_task[0])
1133
 
1134
  task_gui.change(
@@ -1136,16 +1206,12 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1136
  [task_gui],
1137
  [preprocessor_name_gui],
1138
  )
 
1139
  with gr.Row():
1140
- preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocess Resolution")
1141
- low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="Canny low threshold")
1142
- high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="Canny high threshold")
1143
- value_threshold_gui = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="Hough value threshold (MLSD)")
1144
- with gr.Row():
1145
- distance_threshold_gui = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="Hough distance threshold (MLSD)")
1146
- control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
1147
- control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
1148
- control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
1149
 
1150
  with gr.Accordion("IP-Adapter", open=False, visible=True) as menu_ipa:
1151
 
@@ -1204,7 +1270,6 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1204
  style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
1205
 
1206
  with gr.Accordion("Other settings", open=False, visible=True) as menu_other:
1207
- schedule_prediction_type_gui = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
1208
  with gr.Row():
1209
  save_generated_images_gui = gr.Checkbox(value=False, label="Save Generated Images")
1210
  filename_pattern_gui = gr.Textbox(label="Filename pattern", value="model,seed", placeholder="model,seed,sampler,schedule_type,img_width,img_height,guidance_scale,num_steps,vae,prompt_section,neg_prompt_section", lines=1)
@@ -1289,15 +1354,15 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1289
  # enable crop (or disable it)
1290
  # transforms=["crop"],
1291
  brush=gr.Brush(
1292
- default_size="16", # or leave it as 'auto'
1293
- color_mode="fixed", # 'fixed' hides the user swatches and colorpicker, 'defaults' shows it
1294
- # default_color="black", # html names are supported
1295
- colors=[
1296
- "rgba(0, 0, 0, 1)", # rgb(a)
1297
- "rgba(0, 0, 0, 0.1)",
1298
- "rgba(255, 255, 255, 0.1)",
1299
- # "hsl(360, 120, 120)" # in fact any valid colorstring
1300
- ]
1301
  ),
1302
  eraser=gr.Eraser(default_size="16")
1303
  )
@@ -1345,6 +1410,9 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1345
  outputs=[result_up_tab],
1346
  )
1347
 
 
 
 
1348
  ## BEGIN MOD
1349
  interface_mode_gui.change(
1350
  change_interface_mode,
@@ -1379,15 +1447,19 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1379
  gr.on(
1380
  triggers=[lora1_gui.change, lora_scale_1_gui.change, lora2_gui.change, lora_scale_2_gui.change,
1381
  lora3_gui.change, lora_scale_3_gui.change, lora4_gui.change, lora_scale_4_gui.change,
1382
- lora5_gui.change, lora_scale_5_gui.change, prompt_syntax_gui.change],
 
1383
  fn=update_loras,
1384
  inputs=[prompt_gui, prompt_syntax_gui, lora1_gui, lora_scale_1_gui, lora2_gui, lora_scale_2_gui,
1385
- lora3_gui, lora_scale_3_gui, lora4_gui, lora_scale_4_gui, lora5_gui, lora_scale_5_gui],
 
1386
  outputs=[prompt_gui, lora1_gui, lora_scale_1_gui, lora1_info_gui, lora1_copy_gui, lora1_desc_gui,
1387
  lora2_gui, lora_scale_2_gui, lora2_info_gui, lora2_copy_gui, lora2_desc_gui,
1388
  lora3_gui, lora_scale_3_gui, lora3_info_gui, lora3_copy_gui, lora3_desc_gui,
1389
  lora4_gui, lora_scale_4_gui, lora4_info_gui, lora4_copy_gui, lora4_desc_gui,
1390
- lora5_gui, lora_scale_5_gui, lora5_info_gui, lora5_copy_gui, lora5_desc_gui],
 
 
1391
  queue=False,
1392
  trigger_mode="once",
1393
  )
@@ -1396,6 +1468,8 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1396
  lora3_copy_gui.click(apply_lora_prompt, [prompt_gui, lora3_info_gui], [prompt_gui], queue=False)
1397
  lora4_copy_gui.click(apply_lora_prompt, [prompt_gui, lora4_info_gui], [prompt_gui], queue=False)
1398
  lora5_copy_gui.click(apply_lora_prompt, [prompt_gui, lora5_info_gui], [prompt_gui], queue=False)
 
 
1399
  gr.on(
1400
  triggers=[search_civitai_button_lora.click, search_civitai_query_lora.submit],
1401
  fn=search_civitai_lora,
@@ -1407,9 +1481,9 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1407
  )
1408
  search_civitai_result_lora.change(select_civitai_lora, [search_civitai_result_lora], [text_lora, search_civitai_desc_lora], queue=False, scroll_to_output=True)
1409
  search_civitai_gallery_lora.select(update_civitai_selection, None, [search_civitai_result_lora], queue=False, show_api=False)
1410
- button_lora.click(get_my_lora, [text_lora, romanize_text], [lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui, new_lora_status], scroll_to_output=True)
1411
  upload_button_lora.upload(upload_file_lora, [upload_button_lora], [file_output_lora, upload_button_lora]).success(
1412
- move_file_lora, [file_output_lora], [lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui], scroll_to_output=True)
1413
 
1414
  use_textual_inversion_gui.change(set_textual_inversion_prompt, [use_textual_inversion_gui, prompt_gui, neg_prompt_gui, prompt_syntax_gui], [prompt_gui, neg_prompt_gui])
1415
 
@@ -1454,7 +1528,8 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1454
  inputs=[
1455
  model_name_gui,
1456
  vae_model_gui,
1457
- task_gui
 
1458
  ],
1459
  outputs=[load_model_gui],
1460
  queue=True,
@@ -1479,6 +1554,10 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1479
  lora_scale_4_gui,
1480
  lora5_gui,
1481
  lora_scale_5_gui,
 
 
 
 
1482
  sampler_gui,
1483
  schedule_type_gui,
1484
  schedule_prediction_type_gui,
@@ -1499,6 +1578,8 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1499
  high_threshold_gui,
1500
  value_threshold_gui,
1501
  distance_threshold_gui,
 
 
1502
  control_net_output_scaling_gui,
1503
  control_net_start_threshold_gui,
1504
  control_net_stop_threshold_gui,
@@ -1515,6 +1596,9 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1515
  hires_negative_prompt_gui,
1516
  hires_before_adetailer_gui,
1517
  hires_after_adetailer_gui,
 
 
 
1518
  loop_generation_gui,
1519
  leave_progress_bar_gui,
1520
  disable_progress_bar_gui,
@@ -1556,6 +1640,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
1556
  mask_blur_b_gui,
1557
  mask_padding_b_gui,
1558
  retain_task_cache_gui,
 
1559
  image_ip1,
1560
  mask_ip1,
1561
  model_ip1,
 
5
  SCHEDULE_TYPE_OPTIONS,
6
  SCHEDULE_PREDICTION_TYPE_OPTIONS,
7
  check_scheduler_compatibility,
8
+ TASK_AND_PREPROCESSORS,
9
  )
10
  from constants import (
 
11
  TASK_STABLEPY,
12
  TASK_MODEL_LIST,
13
  UPSCALER_DICT_GUI,
 
17
  SDXL_TASK,
18
  MODEL_TYPE_TASK,
19
  POST_PROCESSING_SAMPLER,
20
+ DIFFUSERS_CONTROLNET_MODEL,
21
 
22
  )
23
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
 
43
  html_template_message,
44
  escape_html,
45
  )
46
+ from image_processor import preprocessor_tab
47
  from datetime import datetime
48
  import gradio as gr
49
  import logging
50
  import diffusers
51
  import warnings
52
  from stablepy import logger
53
+ from diffusers import FluxPipeline
54
  # import urllib.parse
55
 
56
  ImageFile.LOAD_TRUNCATED_IMAGES = True
57
+ torch.backends.cuda.matmul.allow_tf32 = True
58
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
59
  print(os.getenv("SPACES_ZERO_GPU"))
60
 
 
65
  update_civitai_selection, get_civitai_tag, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
66
  set_textual_inversion_prompt, get_model_pipeline, change_interface_mode, get_t2i_model_info,
67
  get_tupled_model_list, save_gallery_images, save_gallery_history, set_optimization, set_sampler_settings,
68
+ set_quick_presets, process_style_prompt, optimization_list, save_images, download_things, valid_model_name,
69
  preset_styles, preset_quality, preset_sampler_setting, translate_to_en, EXAMPLES_GUI, RESOURCES)
70
  from env import (HF_TOKEN, CIVITAI_API_KEY, HF_LORA_ESSENTIAL_PRIVATE_REPO, HF_VAE_PRIVATE_REPO,
71
  HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO,
 
120
 
121
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
122
 
123
+ flux_repo = "camenduru/FLUX.1-dev-diffusers"
124
+ flux_pipe = FluxPipeline.from_pretrained(
125
+ flux_repo,
126
+ transformer=None,
127
+ torch_dtype=torch.bfloat16,
128
+ ).to("cuda")
129
+ components = flux_pipe.components
130
+ components.pop("transformer", None)
131
+ delete_model(flux_repo)
132
 
133
  #######################
134
  # GUI
135
  #######################
 
 
136
  logging.getLogger("diffusers").setLevel(logging.ERROR)
 
137
  diffusers.utils.logging.set_verbosity(40)
 
138
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
139
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
140
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
141
  ## BEGIN MOD
 
142
  #logger.setLevel(logging.CRITICAL)
143
  logger.setLevel(logging.DEBUG)
144
 
 
181
  ] + [model_name]
182
  print(self.inventory)
183
 
184
+ def load_new_model(self, model_name, vae_model, task, controlnet_model, progress=gr.Progress(track_tqdm=True)):
 
 
185
 
186
  # download link model > model_name
187
 
188
+ model_name = valid_model_name(model_name) # MOD
189
+
190
+ self.update_storage_models()
191
+
192
  vae_model = vae_model if vae_model != "None" else None
193
  model_type = get_model_type(model_name)
194
  dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
 
240
  vae_model=vae_model,
241
  type_model_precision=dtype_model,
242
  retain_task_model_in_cache=False,
243
+ controlnet_model=controlnet_model,
244
  device="cpu",
245
+ env_components=components,
246
  )
247
+ self.model.advanced_params(image_preprocessor_cuda_active=True)
248
  else:
 
249
  if self.model.base_model_id != model_name:
250
  load_now_time = datetime.now()
251
  elapsed_time = max((load_now_time - self.last_load).total_seconds(), 0)
252
 
253
+ if elapsed_time <= 9:
254
  print("Waiting for the previous model's time ops...")
255
+ time.sleep(9 - elapsed_time)
256
 
257
  self.model.device = torch.device("cpu")
258
  self.model.load_pipe(
 
261
  vae_model=vae_model,
262
  type_model_precision=dtype_model,
263
  retain_task_model_in_cache=False,
264
+ controlnet_model=controlnet_model,
265
  )
266
 
267
  end_time = time.time()
 
298
  lora_scale4,
299
  lora5,
300
  lora_scale5,
301
+ lora6,
302
+ lora_scale6,
303
+ lora7,
304
+ lora_scale7,
305
  sampler,
306
  schedule_type,
307
  schedule_prediction_type,
 
322
  high_threshold,
323
  value_threshold,
324
  distance_threshold,
325
+ recolor_gamma_correction,
326
+ tile_blur_sigma,
327
  controlnet_output_scaling_in_unet,
328
  controlnet_start_threshold,
329
  controlnet_stop_threshold,
 
340
  hires_negative_prompt,
341
  hires_before_adetailer,
342
  hires_after_adetailer,
343
+ hires_schedule_type,
344
+ hires_guidance_scale,
345
+ controlnet_model,
346
  loop_generation,
347
  leave_progress_bar,
348
  disable_progress_bar,
 
384
  mask_blur_b,
385
  mask_padding_b,
386
  retain_task_cache_gui,
387
+ guidance_rescale,
388
  image_ip1,
389
  mask_ip1,
390
  model_ip1,
 
401
  yield info_state, gr.update(), gr.update()
402
 
403
  vae_model = vae_model if vae_model != "None" else None
404
+ loras_list = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
405
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
406
  msg_lora = ""
407
 
 
409
  loras_list = [s if s else "None" for s in loras_list]
410
  global lora_model_list
411
  lora_model_list = get_lora_model_list()
412
+ lora1, lora_scale1, lora2, lora_scale2, lora3, lora_scale3, lora4, lora_scale4, lora5, lora_scale5, lora6, lora_scale6, lora7, lora_scale7 = \
413
  set_prompt_loras(prompt, syntax_weights, model_name, lora1, lora_scale1, lora2, lora_scale2, lora3,
414
+ lora_scale3, lora4, lora_scale4, lora5, lora_scale5, lora6, lora_scale6, lora7, lora_scale7)
415
  ## END MOD
416
 
417
  print("Config model:", model_name, vae_model, loras_list)
 
513
  "high_threshold": high_threshold,
514
  "value_threshold": value_threshold,
515
  "distance_threshold": distance_threshold,
516
+ "recolor_gamma_correction": float(recolor_gamma_correction),
517
+ "tile_blur_sigma": int(tile_blur_sigma),
518
  "lora_A": lora1 if lora1 != "None" else None,
519
  "lora_scale_A": lora_scale1,
520
  "lora_B": lora2 if lora2 != "None" else None,
 
525
  "lora_scale_D": lora_scale4,
526
  "lora_E": lora5 if lora5 != "None" else None,
527
  "lora_scale_E": lora_scale5,
528
+ "lora_F": lora6 if lora6 != "None" else None,
529
+ "lora_scale_F": lora_scale6,
530
+ "lora_G": lora7 if lora7 != "None" else None,
531
+ "lora_scale_G": lora_scale7,
532
  ## BEGIN MOD
533
  "textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
534
  ## END MOD
 
572
  "hires_sampler": hires_sampler,
573
  "hires_before_adetailer": hires_before_adetailer,
574
  "hires_after_adetailer": hires_after_adetailer,
575
+ "hires_schedule_type": hires_schedule_type,
576
+ "hires_guidance_scale": hires_guidance_scale,
577
  "ip_adapter_image": params_ip_img,
578
  "ip_adapter_mask": params_ip_msk,
579
  "ip_adapter_model": params_ip_model,
 
581
  "ip_adapter_scale": params_ip_scale,
582
  }
583
 
584
+ # kwargs for diffusers pipeline
585
+ if guidance_rescale:
586
+ pipe_params["guidance_rescale"] = guidance_rescale
587
+
588
  self.model.device = torch.device("cuda:0")
589
+ if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * self.model.num_loras:
590
  self.model.pipe.transformer.to(self.model.device)
591
  print("transformer to cuda")
592
 
 
614
  if msg_lora:
615
  info_images += msg_lora
616
 
617
+ info_images = info_images + "<br>" + "GENERATION DATA:<br>" + escape_html(metadata[-1]) + "<br>-------<br>"
618
 
619
  download_links = "<br>".join(
620
  [
 
649
 
650
 
651
  def sd_gen_generate_pipeline(*args):
 
652
  gpu_duration_arg = int(args[-1]) if args[-1] else 59
653
  verbose_arg = int(args[-2])
654
  load_lora_cpu = args[-3]
655
  generation_args = args[:-3]
656
  lora_list = [
657
  None if item == "None" or item == "" else item # MOD
658
+ for item in [args[7], args[9], args[11], args[13], args[15], args[17], args[19]]
659
  ]
660
+ lora_status = [None] * sd_gen.model.num_loras
661
 
662
  msg_load_lora = "Updating LoRAs in GPU..."
663
  if load_lora_cpu:
664
+ msg_load_lora = "Updating LoRAs in CPU..."
665
 
666
+ if lora_list != sd_gen.model.lora_memory and lora_list != [None] * sd_gen.model.num_loras:
667
  yield msg_load_lora, gr.update(), gr.update()
668
 
669
  # Load lora in CPU
670
  if load_lora_cpu:
671
+ lora_status = sd_gen.model.load_lora_on_the_fly(
672
  lora_A=lora_list[0], lora_scale_A=args[8],
673
  lora_B=lora_list[1], lora_scale_B=args[10],
674
  lora_C=lora_list[2], lora_scale_C=args[12],
675
  lora_D=lora_list[3], lora_scale_D=args[14],
676
  lora_E=lora_list[4], lora_scale_E=args[16],
677
+ lora_F=lora_list[5], lora_scale_F=args[18],
678
+ lora_G=lora_list[6], lora_scale_G=args[20],
679
  )
680
  print(lora_status)
681
 
682
+ sampler_name = args[21]
683
+ schedule_type_name = args[22]
684
  _, _, msg_sampler = check_scheduler_compatibility(
685
  sd_gen.model.class_name, sampler_name, schedule_type_name
686
  )
 
694
  elif status is not None:
695
  gr.Warning(f"Failed to load LoRA: {lora}")
696
 
697
+ if lora_status == [None] * sd_gen.model.num_loras and sd_gen.model.lora_memory != [None] * sd_gen.model.num_loras and load_lora_cpu:
698
  lora_cache_msg = ", ".join(
699
  str(x) for x in sd_gen.model.lora_memory if x is not None
700
  )
 
751
  return image_path
752
 
753
 
754
+ # https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
755
  dynamic_gpu_duration.zerogpu = True
756
  sd_gen_generate_pipeline.zerogpu = True
757
  sd_gen = GuiSD()
 
777
  with gr.Column():
778
  with gr.Tab("Generation"):
779
  with gr.Row():
780
+ with gr.Column(scale=1):
781
 
782
  def update_task_options(model_name, task_name):
783
  new_choices = MODEL_TYPE_TASK[get_model_type(model_name)]
 
818
  generate_from_image_btn_gui = gr.Button(value="GENERATE TAGS FROM IMAGE")
819
  prompt_gui = gr.Textbox(lines=6, placeholder="1girl, solo, ...", label="Prompt", show_copy_button=True)
820
  with gr.Accordion("Negative prompt, etc.", open=False) as menu_negative:
821
+ neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt", value="lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, worst quality, low quality, very displeasing, (bad)", show_copy_button=True)
822
  translate_prompt_button = gr.Button(value="Translate prompt to English", size="sm", variant="secondary")
823
  with gr.Row():
824
  insert_prompt_gui = gr.Radio(label="Insert reccomended positive / negative prompt", choices=["None", "Auto", "Animagine", "Pony"], value="Auto", interactive=True)
 
856
  label="Generated images",
857
  show_label=False,
858
  elem_id="gallery",
859
+ #columns=[2],
860
+ columns=[1],
861
+ #rows=[2],
862
+ rows=[1],
863
  object_fit="contain",
864
  # height="auto",
865
  interactive=False,
866
+ #preview=False,
867
  preview=False,
868
  show_share_button=False,
869
  show_download_button=True,
 
886
  gpu_duration_gui = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
887
  with gr.Column():
888
  verbose_info_gui = gr.Checkbox(value=False, container=False, label="Status info")
889
+ load_lora_cpu_gui = gr.Checkbox(value=False, container=False, label="Load LoRAs on CPU")
890
 
891
  with gr.Column(scale=1):
892
  with gr.Accordion("Generation settings", open=False, visible=True) as menu_gen:
 
895
  img_height_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Height")
896
  steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=28, label="Steps")
897
  cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7.0, label="CFG")
898
+ guidance_rescale_gui = gr.Slider(label="CFG rescale:", value=0., step=0.01, minimum=0., maximum=1.5)
899
  with gr.Row():
900
  seed_gui = gr.Number(minimum=-1, maximum=2**32-1, value=-1, label="Seed")
901
  pag_scale_gui = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
 
905
  with gr.Row():
906
  sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler")
907
  schedule_type_gui = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
908
+ schedule_prediction_type_gui = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
909
  vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
910
  prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[1][1])
911
 
912
+ with gr.Row(equal_height=False):
913
+
914
+ def run_set_params_gui(base_prompt, name_model):
915
+ valid_receptors = { # default values
916
+ "prompt": gr.update(value=base_prompt),
917
+ "neg_prompt": gr.update(value=""),
918
+ "Steps": gr.update(value=30),
919
+ "width": gr.update(value=1024),
920
+ "height": gr.update(value=1024),
921
+ "Seed": gr.update(value=-1),
922
+ "Sampler": gr.update(value="Euler"),
923
+ "CFG scale": gr.update(value=7.), # cfg
924
+ "Clip skip": gr.update(value=True),
925
+ "Model": gr.update(value=name_model),
926
+ "Schedule type": gr.update(value="Automatic"),
927
+ "PAG": gr.update(value=.0),
928
+ "FreeU": gr.update(value=False),
929
+ }
930
+ valid_keys = list(valid_receptors.keys())
931
+
932
+ parameters = extract_parameters(base_prompt)
933
+ # print(parameters)
934
+
935
+ if "Sampler" in parameters:
936
+ value_sampler = parameters["Sampler"]
937
+ for s_type in SCHEDULE_TYPE_OPTIONS:
938
+ if s_type in value_sampler:
939
+ value_sampler = value_sampler.replace(s_type, "").strip()
940
+ parameters["Sampler"] = value_sampler
941
+ parameters["Schedule type"] = s_type
942
+
943
+ for key, val in parameters.items():
944
+ # print(val)
945
+ if key in valid_keys:
946
+ try:
947
+ if key == "Sampler":
948
+ if val not in scheduler_names:
949
+ continue
950
+ if key == "Schedule type":
951
+ if val not in SCHEDULE_TYPE_OPTIONS:
952
+ val = "Automatic"
953
+ elif key == "Clip skip":
954
+ if "," in str(val):
955
+ val = val.replace(",", "")
956
+ if int(val) >= 2:
957
+ val = True
958
+ if key == "prompt":
959
+ if ">" in val and "<" in val:
960
+ val = re.sub(r'<[^>]+>', '', val)
961
+ print("Removed LoRA written in the prompt")
962
+ if key in ["prompt", "neg_prompt"]:
963
+ val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
964
+ if key in ["Steps", "width", "height", "Seed"]:
965
+ val = int(val)
966
+ if key == "FreeU":
967
  val = True
968
+ if key in ["CFG scale", "PAG"]:
969
+ val = float(val)
970
+ if key == "Model":
971
+ filtered_models = [m for m in model_list if val in m]
972
+ if filtered_models:
973
+ val = filtered_models[0]
974
+ else:
975
+ val = name_model
976
+ if key == "Seed":
977
+ continue
978
+ valid_receptors[key] = gr.update(value=val)
979
+ # print(val, type(val))
980
+ # print(valid_receptors)
981
+ except Exception as e:
982
+ print(str(e))
983
+ return [value for value in valid_receptors.values()]
984
+
985
+ set_params_gui.click(
986
+ run_set_params_gui, [prompt_gui, model_name_gui], [
987
+ prompt_gui,
988
+ neg_prompt_gui,
989
+ steps_gui,
990
+ img_width_gui,
991
+ img_height_gui,
992
+ seed_gui,
993
+ sampler_gui,
994
+ cfg_gui,
995
+ clip_skip_gui,
996
+ model_name_gui,
997
+ schedule_type_gui,
998
+ pag_scale_gui,
999
+ free_u_gui,
1000
+ ],
1001
+ )
1002
+
1003
+ def run_clear_prompt_gui():
1004
+ return gr.update(value=""), gr.update(value="")
1005
+ clear_prompt_gui.click(
1006
+ run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
1007
+ )
1008
+
1009
+ def run_set_random_seed():
1010
+ return -1
1011
+ set_random_seed.click(
1012
+ run_set_random_seed, [], seed_gui
1013
+ )
 
 
 
 
 
 
 
 
 
 
1014
 
1015
  with gr.Accordion("LoRA", open=False, visible=True) as menu_lora:
1016
+ def lora_dropdown(label, visible=True):
1017
+ return gr.Dropdown(label=label, choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320, visible=visible)
1018
 
1019
+ def lora_scale_slider(label, visible=True):
1020
+ return gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label=label, visible=visible)
1021
 
1022
  def lora_textbox(label):
1023
  return gr.Textbox(label=label, info="Example of prompt:", value="None", show_copy_button=True, interactive=False, visible=False)
 
1063
  lora5_info_gui = lora_textbox("LoRA5 prompts")
1064
  lora5_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
1065
  lora5_desc_gui = gr.Markdown(value="", visible=False)
1066
+ with gr.Column():
1067
+ lora6_gui = lora_dropdown("LoRA6", visible=False)
1068
+ lora_scale_6_gui = lora_scale_slider("LoRA Scale 6", visible=False)
1069
+ with gr.Row():
1070
+ with gr.Group():
1071
+ lora6_info_gui = lora_textbox("LoRA6 prompts")
1072
+ lora6_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
1073
+ lora6_desc_gui = gr.Markdown(value="", visible=False)
1074
+ with gr.Column():
1075
+ lora7_gui = lora_dropdown("LoRA7", visible=False)
1076
+ lora_scale_7_gui = lora_scale_slider("LoRA Scale 7", visible=False)
1077
+ with gr.Row():
1078
+ with gr.Group():
1079
+ lora7_info_gui = lora_textbox("LoRA7 prompts")
1080
+ lora7_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
1081
+ lora7_desc_gui = gr.Markdown(value="", visible=False)
1082
  with gr.Accordion("From URL", open=True, visible=True):
1083
  with gr.Row():
1084
  search_civitai_basemodel_lora = gr.CheckboxGroup(label="Search LoRA for", choices=CIVITAI_BASEMODEL, value=["Pony", "Illustrious", "SDXL 1.0"])
 
1095
  search_civitai_result_lora = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
1096
  with gr.Row():
1097
  text_lora = gr.Textbox(label="LoRA's download URL", placeholder="https://civitai.com/api/download/models/28907", info="It has to be .safetensors files, and you can also download them from Hugging Face.", lines=1, scale=4)
1098
+ romanize_text = gr.Checkbox(value=False, label="Transliterate name", scale=1, visible=False)
1099
  button_lora = gr.Button("Get and Refresh the LoRA Lists")
1100
  new_lora_status = gr.HTML()
1101
  with gr.Accordion("From Local", open=True, visible=True):
 
1113
  hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
1114
  hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
1115
  hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
1116
+ hires_schedule_list = ["Use same schedule type"] + SCHEDULE_TYPE_OPTIONS
1117
+ hires_schedule_type_gui = gr.Dropdown(label="Hires Schedule type", choices=hires_schedule_list, value=hires_schedule_list[0])
1118
+ hires_guidance_scale_gui = gr.Slider(minimum=-1., maximum=30., step=0.5, value=-1., label="Hires CFG", info="If the value is -1, the main CFG will be used")
1119
  hires_prompt_gui = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
1120
  hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
1121
 
 
1182
  minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution",
1183
  info="The maximum proportional size of the generated image based on the uploaded image."
1184
  )
1185
+ with gr.Row():
1186
+ controlnet_model_gui = gr.Dropdown(label="ControlNet model", choices=DIFFUSERS_CONTROLNET_MODEL, value=DIFFUSERS_CONTROLNET_MODEL[0])
1187
+ control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
1188
+ control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
1189
+ control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
1190
+ with gr.Row():
1191
+ preprocessor_name_gui = gr.Dropdown(label="Preprocessor Name", choices=TASK_AND_PREPROCESSORS["canny"])
1192
+ preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
1193
+ low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
1194
+ high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
1195
 
1196
  def change_preprocessor_choices(task):
1197
  task = TASK_STABLEPY[task]
1198
+ if task in TASK_AND_PREPROCESSORS.keys():
1199
+ choices_task = TASK_AND_PREPROCESSORS[task]
1200
  else:
1201
+ choices_task = TASK_AND_PREPROCESSORS["canny"]
1202
  return gr.update(choices=choices_task, value=choices_task[0])
1203
 
1204
  task_gui.change(
 
1206
  [task_gui],
1207
  [preprocessor_name_gui],
1208
  )
1209
+
1210
  with gr.Row():
1211
+ value_threshold_gui = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
1212
+ distance_threshold_gui = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
1213
+ recolor_gamma_correction_gui = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
1214
+ tile_blur_sigma_gui = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
 
 
 
 
 
1215
 
1216
  with gr.Accordion("IP-Adapter", open=False, visible=True) as menu_ipa:
1217
 
 
1270
  style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
1271
 
1272
  with gr.Accordion("Other settings", open=False, visible=True) as menu_other:
 
1273
  with gr.Row():
1274
  save_generated_images_gui = gr.Checkbox(value=False, label="Save Generated Images")
1275
  filename_pattern_gui = gr.Textbox(label="Filename pattern", value="model,seed", placeholder="model,seed,sampler,schedule_type,img_width,img_height,guidance_scale,num_steps,vae,prompt_section,neg_prompt_section", lines=1)
 
1354
  # enable crop (or disable it)
1355
  # transforms=["crop"],
1356
  brush=gr.Brush(
1357
+ default_size="16", # or leave it as 'auto'
1358
+ color_mode="fixed", # 'fixed' hides the user swatches and colorpicker, 'defaults' shows it
1359
+ # default_color="black", # html names are supported
1360
+ colors=[
1361
+ "rgba(0, 0, 0, 1)", # rgb(a)
1362
+ "rgba(0, 0, 0, 0.1)",
1363
+ "rgba(255, 255, 255, 0.1)",
1364
+ # "hsl(360, 120, 120)" # in fact any valid colorstring
1365
+ ]
1366
  ),
1367
  eraser=gr.Eraser(default_size="16")
1368
  )
 
1410
  outputs=[result_up_tab],
1411
  )
1412
 
1413
+ with gr.Tab("Preprocessor", render=True):
1414
+ preprocessor_tab()
1415
+
1416
  ## BEGIN MOD
1417
  interface_mode_gui.change(
1418
  change_interface_mode,
 
1447
  gr.on(
1448
  triggers=[lora1_gui.change, lora_scale_1_gui.change, lora2_gui.change, lora_scale_2_gui.change,
1449
  lora3_gui.change, lora_scale_3_gui.change, lora4_gui.change, lora_scale_4_gui.change,
1450
+ lora5_gui.change, lora_scale_5_gui.change, lora6_gui.change, lora_scale_6_gui.change,
1451
+ lora7_gui.change, lora_scale_7_gui.change, prompt_syntax_gui.change],
1452
  fn=update_loras,
1453
  inputs=[prompt_gui, prompt_syntax_gui, lora1_gui, lora_scale_1_gui, lora2_gui, lora_scale_2_gui,
1454
+ lora3_gui, lora_scale_3_gui, lora4_gui, lora_scale_4_gui, lora5_gui, lora_scale_5_gui,
1455
+ lora6_gui, lora_scale_6_gui, lora7_gui, lora_scale_7_gui],
1456
  outputs=[prompt_gui, lora1_gui, lora_scale_1_gui, lora1_info_gui, lora1_copy_gui, lora1_desc_gui,
1457
  lora2_gui, lora_scale_2_gui, lora2_info_gui, lora2_copy_gui, lora2_desc_gui,
1458
  lora3_gui, lora_scale_3_gui, lora3_info_gui, lora3_copy_gui, lora3_desc_gui,
1459
  lora4_gui, lora_scale_4_gui, lora4_info_gui, lora4_copy_gui, lora4_desc_gui,
1460
+ lora5_gui, lora_scale_5_gui, lora5_info_gui, lora5_copy_gui, lora5_desc_gui,
1461
+ lora6_gui, lora_scale_6_gui, lora6_info_gui, lora6_copy_gui, lora6_desc_gui,
1462
+ lora7_gui, lora_scale_7_gui, lora7_info_gui, lora7_copy_gui, lora7_desc_gui],
1463
  queue=False,
1464
  trigger_mode="once",
1465
  )
 
1468
  lora3_copy_gui.click(apply_lora_prompt, [prompt_gui, lora3_info_gui], [prompt_gui], queue=False)
1469
  lora4_copy_gui.click(apply_lora_prompt, [prompt_gui, lora4_info_gui], [prompt_gui], queue=False)
1470
  lora5_copy_gui.click(apply_lora_prompt, [prompt_gui, lora5_info_gui], [prompt_gui], queue=False)
1471
+ lora6_copy_gui.click(apply_lora_prompt, [prompt_gui, lora6_info_gui], [prompt_gui], queue=False)
1472
+ lora7_copy_gui.click(apply_lora_prompt, [prompt_gui, lora7_info_gui], [prompt_gui], queue=False)
1473
  gr.on(
1474
  triggers=[search_civitai_button_lora.click, search_civitai_query_lora.submit],
1475
  fn=search_civitai_lora,
 
1481
  )
1482
  search_civitai_result_lora.change(select_civitai_lora, [search_civitai_result_lora], [text_lora, search_civitai_desc_lora], queue=False, scroll_to_output=True)
1483
  search_civitai_gallery_lora.select(update_civitai_selection, None, [search_civitai_result_lora], queue=False, show_api=False)
1484
+ button_lora.click(get_my_lora, [text_lora, romanize_text], [lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui, lora6_gui, lora7_gui, new_lora_status], scroll_to_output=True)
1485
  upload_button_lora.upload(upload_file_lora, [upload_button_lora], [file_output_lora, upload_button_lora]).success(
1486
+ move_file_lora, [file_output_lora], [lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui, lora6_gui, lora7_gui], scroll_to_output=True)
1487
 
1488
  use_textual_inversion_gui.change(set_textual_inversion_prompt, [use_textual_inversion_gui, prompt_gui, neg_prompt_gui, prompt_syntax_gui], [prompt_gui, neg_prompt_gui])
1489
 
 
1528
  inputs=[
1529
  model_name_gui,
1530
  vae_model_gui,
1531
+ task_gui,
1532
+ controlnet_model_gui,
1533
  ],
1534
  outputs=[load_model_gui],
1535
  queue=True,
 
1554
  lora_scale_4_gui,
1555
  lora5_gui,
1556
  lora_scale_5_gui,
1557
+ lora6_gui,
1558
+ lora_scale_6_gui,
1559
+ lora7_gui,
1560
+ lora_scale_7_gui,
1561
  sampler_gui,
1562
  schedule_type_gui,
1563
  schedule_prediction_type_gui,
 
1578
  high_threshold_gui,
1579
  value_threshold_gui,
1580
  distance_threshold_gui,
1581
+ recolor_gamma_correction_gui,
1582
+ tile_blur_sigma_gui,
1583
  control_net_output_scaling_gui,
1584
  control_net_start_threshold_gui,
1585
  control_net_stop_threshold_gui,
 
1596
  hires_negative_prompt_gui,
1597
  hires_before_adetailer_gui,
1598
  hires_after_adetailer_gui,
1599
+ hires_schedule_type_gui,
1600
+ hires_guidance_scale_gui,
1601
+ controlnet_model_gui,
1602
  loop_generation_gui,
1603
  leave_progress_bar_gui,
1604
  disable_progress_bar_gui,
 
1640
  mask_blur_b_gui,
1641
  mask_padding_b_gui,
1642
  retain_task_cache_gui,
1643
+ guidance_rescale_gui,
1644
  image_ip1,
1645
  mask_ip1,
1646
  model_ip1,
constants.py CHANGED
@@ -17,7 +17,7 @@ DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book
17
 
18
  LOAD_DIFFUSERS_FORMAT_MODEL = [
19
  'stabilityai/stable-diffusion-xl-base-1.0',
20
- 'Laxhar/noobai-XL-1.0',
21
  'black-forest-labs/FLUX.1-dev',
22
  'John6666/blue-pencil-flux1-v021-fp8-flux',
23
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
@@ -31,6 +31,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
31
  'terminusresearch/FluxBooru-v0.3',
32
  'ostris/OpenFLUX.1',
33
  'shuttleai/shuttle-3-diffusion',
 
34
  'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
35
  'Laxhar/noobai-XL-0.77',
36
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
@@ -40,9 +41,13 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
40
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
41
  'John6666/ntr-mix-illustrious-xl-noob-xl-v40-sdxl',
42
  'John6666/ntr-mix-illustrious-xl-noob-xl-ntrmix35-sdxl',
 
 
43
  'John6666/haruki-mix-illustrious-v10-sdxl',
44
  'John6666/noobreal-v10-sdxl',
45
  'John6666/complicated-noobai-merge-vprediction-sdxl',
 
 
46
  'Laxhar/noobai-XL-Vpred-0.6',
47
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
48
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
@@ -56,6 +61,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
56
  'John6666/wai-nsfw-illustrious-v70-sdxl',
57
  'John6666/illustrious-pony-mix-v3-sdxl',
58
  'John6666/nova-anime-xl-illustriousv10-sdxl',
 
59
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
60
  'eienmojiki/Anything-XL',
61
  'eienmojiki/Starry-XL-v5.2',
@@ -82,9 +88,8 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
82
  'John6666/prefect-pony-xl-v4-sdxl',
83
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
84
  'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
85
- 'John6666/wai-ani-nsfw-ponyxl-v9-sdxl',
86
  'John6666/wai-real-mix-v11-sdxl',
87
- 'John6666/babes-by-stable-yogi-ponyv3-sdxl',
88
  'John6666/wai-c-v6-sdxl',
89
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
90
  'John6666/sifw-annihilation-xl-v2-sdxl',
@@ -114,7 +119,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
114
  'digiplay/DarkSushi2.5D_v1',
115
  'digiplay/darkphoenix3D_v1.1',
116
  'digiplay/BeenYouLiteL11_diffusers',
117
- 'Yntec/RevAnimatedV2Rebirth',
118
  'youknownothing/cyberrealistic_v50',
119
  'youknownothing/deliberate-v6',
120
  'GraydientPlatformAPI/deliberate-cyber3',
@@ -142,7 +147,7 @@ DOWNLOAD_EMBEDS = [
142
  'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
143
  # 'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
144
  # 'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
145
- ]
146
 
147
  CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
148
  HF_TOKEN = os.environ.get("HF_READ_TOKEN")
@@ -155,79 +160,6 @@ DIRECTORY_EMBEDS = 'embedings'
155
  CACHE_HF = "/home/user/.cache/huggingface/hub/"
156
  STORAGE_ROOT = "/home/user/"
157
 
158
- PREPROCESSOR_CONTROLNET = {
159
- "openpose": [
160
- "Openpose",
161
- "None",
162
- ],
163
- "scribble": [
164
- "HED",
165
- "PidiNet",
166
- "None",
167
- ],
168
- "softedge": [
169
- "PidiNet",
170
- "HED",
171
- "HED safe",
172
- "PidiNet safe",
173
- "None",
174
- ],
175
- "segmentation": [
176
- "UPerNet",
177
- "None",
178
- ],
179
- "depth": [
180
- "DPT",
181
- "Midas",
182
- "None",
183
- ],
184
- "normalbae": [
185
- "NormalBae",
186
- "None",
187
- ],
188
- "lineart": [
189
- "Lineart",
190
- "Lineart coarse",
191
- "Lineart (anime)",
192
- "None",
193
- "None (anime)",
194
- ],
195
- "lineart_anime": [
196
- "Lineart",
197
- "Lineart coarse",
198
- "Lineart (anime)",
199
- "None",
200
- "None (anime)",
201
- ],
202
- "shuffle": [
203
- "ContentShuffle",
204
- "None",
205
- ],
206
- "canny": [
207
- "Canny",
208
- "None",
209
- ],
210
- "mlsd": [
211
- "MLSD",
212
- "None",
213
- ],
214
- "ip2p": [
215
- "ip2p"
216
- ],
217
- "recolor": [
218
- "Recolor luminance",
219
- "Recolor intensity",
220
- "None",
221
- ],
222
- "tile": [
223
- "Mild Blur",
224
- "Moderate Blur",
225
- "Heavy Blur",
226
- "None",
227
- ],
228
-
229
- }
230
-
231
  TASK_STABLEPY = {
232
  'txt2img': 'txt2img',
233
  'img2img': 'img2img',
@@ -284,11 +216,74 @@ UPSCALER_DICT_GUI = {
284
 
285
  UPSCALER_KEYS = list(UPSCALER_DICT_GUI.keys())
286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  PROMPT_W_OPTIONS = [
288
  ("Compel format: (word)weight", "Compel"),
289
  ("Classic format: (word:weight)", "Classic"),
290
  ("Classic-original format: (word:weight)", "Classic-original"),
291
  ("Classic-no_norm format: (word:weight)", "Classic-no_norm"),
 
292
  ("Classic-ignore", "Classic-ignore"),
293
  ("None", "None"),
294
  ]
@@ -371,7 +366,7 @@ EXAMPLES_GUI = [
371
  1.0, # cn scale
372
  0.0, # cn start
373
  1.0, # cn end
374
- "Classic",
375
  "Nearest",
376
  45,
377
  False,
@@ -384,7 +379,7 @@ EXAMPLES_GUI = [
384
  -1,
385
  "None",
386
  0.33,
387
- "FlowMatchEuler",
388
  1152,
389
  896,
390
  "black-forest-labs/FLUX.1-dev",
@@ -408,7 +403,7 @@ EXAMPLES_GUI = [
408
  -1,
409
  "None",
410
  0.33,
411
- "DPM++ 2M SDE Lu",
412
  1024,
413
  1024,
414
  "John6666/epicrealism-xl-v10kiss2-sdxl",
@@ -491,7 +486,7 @@ EXAMPLES_GUI = [
491
  1.0, # cn scale
492
  0.0, # cn start
493
  0.9, # cn end
494
- "Compel",
495
  "Latent (antialiased)",
496
  46,
497
  False,
 
17
 
18
  LOAD_DIFFUSERS_FORMAT_MODEL = [
19
  'stabilityai/stable-diffusion-xl-base-1.0',
20
+ 'Laxhar/noobai-XL-1.1',
21
  'black-forest-labs/FLUX.1-dev',
22
  'John6666/blue-pencil-flux1-v021-fp8-flux',
23
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
 
31
  'terminusresearch/FluxBooru-v0.3',
32
  'ostris/OpenFLUX.1',
33
  'shuttleai/shuttle-3-diffusion',
34
+ 'Laxhar/noobai-XL-1.0',
35
  'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
36
  'Laxhar/noobai-XL-0.77',
37
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
 
41
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
42
  'John6666/ntr-mix-illustrious-xl-noob-xl-v40-sdxl',
43
  'John6666/ntr-mix-illustrious-xl-noob-xl-ntrmix35-sdxl',
44
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v777-sdxl',
45
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v777forlora-sdxl',
46
  'John6666/haruki-mix-illustrious-v10-sdxl',
47
  'John6666/noobreal-v10-sdxl',
48
  'John6666/complicated-noobai-merge-vprediction-sdxl',
49
+ 'Laxhar/noobai-XL-Vpred-0.65s',
50
+ 'Laxhar/noobai-XL-Vpred-0.65',
51
  'Laxhar/noobai-XL-Vpred-0.6',
52
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
53
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
 
61
  'John6666/wai-nsfw-illustrious-v70-sdxl',
62
  'John6666/illustrious-pony-mix-v3-sdxl',
63
  'John6666/nova-anime-xl-illustriousv10-sdxl',
64
+ 'John6666/nova-orange-xl-v30-sdxl',
65
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
66
  'eienmojiki/Anything-XL',
67
  'eienmojiki/Starry-XL-v5.2',
 
88
  'John6666/prefect-pony-xl-v4-sdxl',
89
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
90
  'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
 
91
  'John6666/wai-real-mix-v11-sdxl',
92
+ 'John6666/wai-shuffle-pdxl-v2-sdxl',
93
  'John6666/wai-c-v6-sdxl',
94
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
95
  'John6666/sifw-annihilation-xl-v2-sdxl',
 
119
  'digiplay/DarkSushi2.5D_v1',
120
  'digiplay/darkphoenix3D_v1.1',
121
  'digiplay/BeenYouLiteL11_diffusers',
122
+ 'GraydientPlatformAPI/rev-animated2',
123
  'youknownothing/cyberrealistic_v50',
124
  'youknownothing/deliberate-v6',
125
  'GraydientPlatformAPI/deliberate-cyber3',
 
147
  'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
148
  # 'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
149
  # 'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
150
+ ]
151
 
152
  CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
153
  HF_TOKEN = os.environ.get("HF_READ_TOKEN")
 
160
  CACHE_HF = "/home/user/.cache/huggingface/hub/"
161
  STORAGE_ROOT = "/home/user/"
162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  TASK_STABLEPY = {
164
  'txt2img': 'txt2img',
165
  'img2img': 'img2img',
 
216
 
217
  UPSCALER_KEYS = list(UPSCALER_DICT_GUI.keys())
218
 
219
+ DIFFUSERS_CONTROLNET_MODEL = [
220
+ "Automatic",
221
+
222
+ "xinsir/controlnet-union-sdxl-1.0",
223
+ "xinsir/anime-painter",
224
+ "Eugeoter/noob-sdxl-controlnet-canny",
225
+ "Eugeoter/noob-sdxl-controlnet-lineart_anime",
226
+ "Eugeoter/noob-sdxl-controlnet-depth",
227
+ "Eugeoter/noob-sdxl-controlnet-normal",
228
+ "Eugeoter/noob-sdxl-controlnet-softedge_hed",
229
+ "Eugeoter/noob-sdxl-controlnet-scribble_pidinet",
230
+ "Eugeoter/noob-sdxl-controlnet-scribble_hed",
231
+ "Eugeoter/noob-sdxl-controlnet-manga_line",
232
+ "Eugeoter/noob-sdxl-controlnet-lineart_realistic",
233
+ "Eugeoter/noob-sdxl-controlnet-depth_midas-v1-1",
234
+ "dimitribarbot/controlnet-openpose-sdxl-1.0-safetensors",
235
+ "r3gm/controlnet-openpose-sdxl-1.0-fp16",
236
+ "r3gm/controlnet-canny-scribble-integrated-sdxl-v2-fp16",
237
+ "r3gm/controlnet-union-sdxl-1.0-fp16",
238
+ "r3gm/controlnet-lineart-anime-sdxl-fp16",
239
+ "r3gm/control_v1p_sdxl_qrcode_monster_fp16",
240
+ "r3gm/controlnet-tile-sdxl-1.0-fp16",
241
+ "r3gm/controlnet-recolor-sdxl-fp16",
242
+ "r3gm/controlnet-openpose-twins-sdxl-1.0-fp16",
243
+ "r3gm/controlnet-qr-pattern-sdxl-fp16",
244
+ "brad-twinkl/controlnet-union-sdxl-1.0-promax",
245
+ "Yakonrus/SDXL_Controlnet_Tile_Realistic_v2",
246
+ "TheMistoAI/MistoLine",
247
+ "briaai/BRIA-2.3-ControlNet-Recoloring",
248
+ "briaai/BRIA-2.3-ControlNet-Canny",
249
+
250
+ "lllyasviel/control_v11p_sd15_openpose",
251
+ "lllyasviel/control_v11p_sd15_canny",
252
+ "lllyasviel/control_v11p_sd15_mlsd",
253
+ "lllyasviel/control_v11p_sd15_scribble",
254
+ "lllyasviel/control_v11p_sd15_softedge",
255
+ "lllyasviel/control_v11p_sd15_seg",
256
+ "lllyasviel/control_v11f1p_sd15_depth",
257
+ "lllyasviel/control_v11p_sd15_normalbae",
258
+ "lllyasviel/control_v11p_sd15_lineart",
259
+ "lllyasviel/control_v11p_sd15s2_lineart_anime",
260
+ "lllyasviel/control_v11e_sd15_shuffle",
261
+ "lllyasviel/control_v11e_sd15_ip2p",
262
+ "lllyasviel/control_v11p_sd15_inpaint",
263
+ "monster-labs/control_v1p_sd15_qrcode_monster",
264
+ "lllyasviel/control_v11f1e_sd15_tile",
265
+ "latentcat/control_v1p_sd15_brightness",
266
+ "yuanqiuye/qrcode_controlnet_v3",
267
+
268
+ "Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
269
+ # "Shakker-Labs/FLUX.1-dev-ControlNet-Pose",
270
+ # "Shakker-Labs/FLUX.1-dev-ControlNet-Depth",
271
+ # "jasperai/Flux.1-dev-Controlnet-Upscaler",
272
+ # "jasperai/Flux.1-dev-Controlnet-Depth",
273
+ # "jasperai/Flux.1-dev-Controlnet-Surface-Normals",
274
+ # "XLabs-AI/flux-controlnet-canny-diffusers",
275
+ # "XLabs-AI/flux-controlnet-hed-diffusers",
276
+ # "XLabs-AI/flux-controlnet-depth-diffusers",
277
+ # "InstantX/FLUX.1-dev-Controlnet-Union",
278
+ # "InstantX/FLUX.1-dev-Controlnet-Canny",
279
+ ]
280
+
281
  PROMPT_W_OPTIONS = [
282
  ("Compel format: (word)weight", "Compel"),
283
  ("Classic format: (word:weight)", "Classic"),
284
  ("Classic-original format: (word:weight)", "Classic-original"),
285
  ("Classic-no_norm format: (word:weight)", "Classic-no_norm"),
286
+ ("Classic-sd_embed format: (word:weight)", "Classic-sd_embed"),
287
  ("Classic-ignore", "Classic-ignore"),
288
  ("None", "None"),
289
  ]
 
366
  1.0, # cn scale
367
  0.0, # cn start
368
  1.0, # cn end
369
+ "Classic-no_norm",
370
  "Nearest",
371
  45,
372
  False,
 
379
  -1,
380
  "None",
381
  0.33,
382
+ "FlowMatch Euler",
383
  1152,
384
  896,
385
  "black-forest-labs/FLUX.1-dev",
 
403
  -1,
404
  "None",
405
  0.33,
406
+ "DPM++ 2M SDE Ef",
407
  1024,
408
  1024,
409
  "John6666/epicrealism-xl-v10kiss2-sdxl",
 
486
  1.0, # cn scale
487
  0.0, # cn start
488
  0.9, # cn end
489
+ "Classic-original",
490
  "Latent (antialiased)",
491
  46,
492
  False,
image_processor.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ from stablepy import Preprocessor
4
+
5
+ PREPROCESSOR_TASKS_LIST = [
6
+ "Canny",
7
+ "Openpose",
8
+ "DPT",
9
+ "Midas",
10
+ "ZoeDepth",
11
+ "DepthAnything",
12
+ "HED",
13
+ "PidiNet",
14
+ "TEED",
15
+ "Lineart",
16
+ "LineartAnime",
17
+ "Anyline",
18
+ "Lineart standard",
19
+ "SegFormer",
20
+ "UPerNet",
21
+ "ContentShuffle",
22
+ "Recolor",
23
+ "Blur",
24
+ "MLSD",
25
+ "NormalBae",
26
+ ]
27
+
28
+ preprocessor = Preprocessor()
29
+
30
+
31
+ def process_inputs(
32
+ image,
33
+ name,
34
+ resolution,
35
+ precessor_resolution,
36
+ low_threshold,
37
+ high_threshold,
38
+ value_threshod,
39
+ distance_threshold,
40
+ recolor_mode,
41
+ recolor_gamma_correction,
42
+ blur_k_size,
43
+ pre_openpose_extra,
44
+ hed_scribble,
45
+ pre_pidinet_safe,
46
+ pre_lineart_coarse,
47
+ use_cuda,
48
+ ):
49
+ if not image:
50
+ raise ValueError("To use this, simply upload an image.")
51
+
52
+ preprocessor.load(name, False)
53
+
54
+ params = dict(
55
+ image_resolution=resolution,
56
+ detect_resolution=precessor_resolution,
57
+ low_threshold=low_threshold,
58
+ high_threshold=high_threshold,
59
+ thr_v=value_threshod,
60
+ thr_d=distance_threshold,
61
+ mode=recolor_mode,
62
+ gamma_correction=recolor_gamma_correction,
63
+ blur_sigma=blur_k_size,
64
+ hand_and_face=pre_openpose_extra,
65
+ scribble=hed_scribble,
66
+ safe=pre_pidinet_safe,
67
+ coarse=pre_lineart_coarse,
68
+ )
69
+
70
+ if use_cuda:
71
+ @spaces.GPU(duration=15)
72
+ def wrapped_func():
73
+ preprocessor.to("cuda")
74
+ return preprocessor(image, **params)
75
+ return wrapped_func()
76
+
77
+ return preprocessor(image, **params)
78
+
79
+
80
+ def preprocessor_tab():
81
+ with gr.Row():
82
+ with gr.Column():
83
+ pre_image = gr.Image(label="Image", type="pil", sources=["upload"])
84
+ pre_options = gr.Dropdown(label="Preprocessor", choices=PREPROCESSOR_TASKS_LIST, value=PREPROCESSOR_TASKS_LIST[0])
85
+ pre_img_resolution = gr.Slider(
86
+ minimum=64, maximum=4096, step=64, value=1024, label="Image Resolution",
87
+ info="The maximum proportional size of the generated image based on the uploaded image."
88
+ )
89
+ pre_start = gr.Button(value="PROCESS IMAGE", variant="primary")
90
+ with gr.Accordion("Advanced Settings", open=False):
91
+ with gr.Column():
92
+ pre_processor_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
93
+ pre_low_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
94
+ pre_high_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
95
+ pre_value_threshold = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
96
+ pre_distance_threshold = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
97
+ pre_recolor_mode = gr.Dropdown(label="'RECOLOR' mode", choices=["luminance", "intensity"], value="luminance")
98
+ pre_recolor_gamma_correction = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
99
+ pre_blur_k_size = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'BLUR' sigma")
100
+ pre_openpose_extra = gr.Checkbox(value=True, label="'OPENPOSE' face and hand")
101
+ pre_hed_scribble = gr.Checkbox(value=False, label="'HED' scribble")
102
+ pre_pidinet_safe = gr.Checkbox(value=False, label="'PIDINET' safe")
103
+ pre_lineart_coarse = gr.Checkbox(value=False, label="'LINEART' coarse")
104
+ pre_use_cuda = gr.Checkbox(value=False, label="Use CUDA")
105
+
106
+ with gr.Column():
107
+ pre_result = gr.Image(label="Result", type="pil", interactive=False, format="png")
108
+
109
+ pre_start.click(
110
+ fn=process_inputs,
111
+ inputs=[
112
+ pre_image,
113
+ pre_options,
114
+ pre_img_resolution,
115
+ pre_processor_resolution,
116
+ pre_low_threshold,
117
+ pre_high_threshold,
118
+ pre_value_threshold,
119
+ pre_distance_threshold,
120
+ pre_recolor_mode,
121
+ pre_recolor_gamma_correction,
122
+ pre_blur_k_size,
123
+ pre_openpose_extra,
124
+ pre_hed_scribble,
125
+ pre_pidinet_safe,
126
+ pre_lineart_coarse,
127
+ pre_use_cuda,
128
+ ],
129
+ outputs=[pre_result],
130
+ )
modutils.py CHANGED
@@ -302,6 +302,10 @@ def safe_float(input):
302
  return output
303
 
304
 
 
 
 
 
305
  def save_images(images: list[Image.Image], metadatas: list[str]):
306
  from PIL import PngImagePlugin
307
  import uuid
@@ -566,7 +570,8 @@ private_lora_model_list = get_private_lora_model_lists()
566
 
567
  def get_civitai_info(path):
568
  global civitai_not_exists_list
569
- if path in set(civitai_not_exists_list): return ["", "", "", "", ""]
 
570
  if not Path(path).exists(): return None
571
  user_agent = get_user_agent()
572
  headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
@@ -584,12 +589,12 @@ def get_civitai_info(path):
584
  r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
585
  except Exception as e:
586
  print(e)
587
- return ["", "", "", "", ""]
588
  if not r.ok: return None
589
  json = r.json()
590
  if not 'baseModel' in json:
591
  civitai_not_exists_list.append(path)
592
- return ["", "", "", "", ""]
593
  items = []
594
  items.append(" / ".join(json['trainedWords']))
595
  items.append(json['baseModel'])
@@ -690,7 +695,7 @@ def copy_lora(path: str, new_path: str):
690
  return None
691
 
692
 
693
- def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str):
694
  path = download_lora(dl_urls)
695
  if path:
696
  if not lora1 or lora1 == "None":
@@ -703,9 +708,13 @@ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: st
703
  lora4 = path
704
  elif not lora5 or lora5 == "None":
705
  lora5 = path
 
 
 
 
706
  choices = get_all_lora_tupled_list()
707
  return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
708
- gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
709
 
710
 
711
  def get_valid_lora_name(query: str, model_name: str):
@@ -745,25 +754,31 @@ def get_valid_lora_wt(prompt: str, lora_path: str, lora_wt: float):
745
  return wt
746
 
747
 
748
- def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
749
- if not "Classic" in str(prompt_syntax): return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
750
  lora1 = get_valid_lora_name(lora1, model_name)
751
  lora2 = get_valid_lora_name(lora2, model_name)
752
  lora3 = get_valid_lora_name(lora3, model_name)
753
  lora4 = get_valid_lora_name(lora4, model_name)
754
  lora5 = get_valid_lora_name(lora5, model_name)
755
- if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
 
 
756
  lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
757
  lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
758
  lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
759
  lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
760
  lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
 
 
761
  on1, label1, tag1, md1 = get_lora_info(lora1)
762
  on2, label2, tag2, md2 = get_lora_info(lora2)
763
  on3, label3, tag3, md3 = get_lora_info(lora3)
764
  on4, label4, tag4, md4 = get_lora_info(lora4)
765
  on5, label5, tag5, md5 = get_lora_info(lora5)
766
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
 
 
767
  prompts = prompt.split(",") if prompt else []
768
  for p in prompts:
769
  p = str(p).strip()
@@ -780,30 +795,40 @@ def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2,
780
  continue
781
  elif not on1:
782
  lora1 = path
783
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
784
  lora1_wt = safe_float(wt)
785
  on1 = True
786
  elif not on2:
787
  lora2 = path
788
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
789
  lora2_wt = safe_float(wt)
790
  on2 = True
791
  elif not on3:
792
  lora3 = path
793
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
794
  lora3_wt = safe_float(wt)
795
  on3 = True
796
  elif not on4:
797
  lora4 = path
798
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
799
  lora4_wt = safe_float(wt)
800
  on4 = True
801
  elif not on5:
802
  lora5 = path
803
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
804
  lora5_wt = safe_float(wt)
805
  on5 = True
806
- return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
 
 
 
 
 
 
 
 
 
 
807
 
808
 
809
  def get_lora_info(lora_path: str):
@@ -864,13 +889,15 @@ def apply_lora_prompt(prompt: str = "", lora_info: str = ""):
864
  return gr.update(value=prompt)
865
 
866
 
867
- def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
868
  on1, label1, tag1, md1 = get_lora_info(lora1)
869
  on2, label2, tag2, md2 = get_lora_info(lora2)
870
  on3, label3, tag3, md3 = get_lora_info(lora3)
871
  on4, label4, tag4, md4 = get_lora_info(lora4)
872
  on5, label5, tag5, md5 = get_lora_info(lora5)
873
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
 
 
874
 
875
  output_prompt = prompt
876
  if "Classic" in str(prompt_syntax):
@@ -895,6 +922,8 @@ def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3,
895
  if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
896
  if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
897
  if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
 
 
898
  output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
899
  choices = get_all_lora_tupled_list()
900
 
@@ -907,7 +936,11 @@ def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3,
907
  gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
908
  gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
909
  gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
910
- gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
 
 
 
 
911
 
912
 
913
  def get_my_lora(link_url, romanize):
@@ -926,7 +959,6 @@ def get_my_lora(link_url, romanize):
926
  path.resolve().rename(new_path.resolve())
927
  update_lora_dict(str(new_path))
928
  l_path = str(new_path)
929
- new_lora_model_list = get_lora_model_list()
930
  new_lora_tupled_list = get_all_lora_tupled_list()
931
  msg_lora = "Downloaded"
932
  if l_name:
@@ -943,6 +975,10 @@ def get_my_lora(link_url, romanize):
943
  choices=new_lora_tupled_list
944
  ), gr.update(
945
  choices=new_lora_tupled_list
 
 
 
 
946
  ), gr.update(
947
  value=msg_lora
948
  )
@@ -975,12 +1011,19 @@ def move_file_lora(filepaths):
975
  choices=new_lora_tupled_list
976
  ), gr.update(
977
  choices=new_lora_tupled_list
 
 
 
 
978
  )
979
 
980
 
981
- CIVITAI_SORT = ["Highest Rated", "Most Downloaded", "Newest"]
982
  CIVITAI_PERIOD = ["AllTime", "Year", "Month", "Week", "Day"]
983
- CIVITAI_BASEMODEL = ["Pony", "Illustrious", "SDXL 1.0", "SD 1.5", "Flux.1 D", "Flux.1 S"]
 
 
 
984
 
985
 
986
  def get_civitai_info(path):
@@ -1025,6 +1068,7 @@ def search_lora_on_civitai(query: str, allow_model: list[str] = ["Pony", "SDXL 1
1025
  sort: str = "Highest Rated", period: str = "AllTime", tag: str = "", user: str = "", page: int = 1):
1026
  user_agent = get_user_agent()
1027
  headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
 
1028
  base_url = 'https://civitai.com/api/v1/models'
1029
  params = {'types': ['LORA'], 'sort': sort, 'period': period, 'limit': limit, 'page': int(page), 'nsfw': 'true'}
1030
  if query: params["query"] = query
 
302
  return output
303
 
304
 
305
+ def valid_model_name(model_name: str):
306
+ return model_name.split(" ")[0]
307
+
308
+
309
  def save_images(images: list[Image.Image], metadatas: list[str]):
310
  from PIL import PngImagePlugin
311
  import uuid
 
570
 
571
  def get_civitai_info(path):
572
  global civitai_not_exists_list
573
+ default = ["", "", "", "", ""]
574
+ if path in set(civitai_not_exists_list): return default
575
  if not Path(path).exists(): return None
576
  user_agent = get_user_agent()
577
  headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
 
589
  r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
590
  except Exception as e:
591
  print(e)
592
+ return default
593
  if not r.ok: return None
594
  json = r.json()
595
  if not 'baseModel' in json:
596
  civitai_not_exists_list.append(path)
597
+ return default
598
  items = []
599
  items.append(" / ".join(json['trainedWords']))
600
  items.append(json['baseModel'])
 
695
  return None
696
 
697
 
698
+ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str, lora6: str, lora7: str):
699
  path = download_lora(dl_urls)
700
  if path:
701
  if not lora1 or lora1 == "None":
 
708
  lora4 = path
709
  elif not lora5 or lora5 == "None":
710
  lora5 = path
711
+ #elif not lora6 or lora6 == "None":
712
+ # lora6 = path
713
+ #elif not lora7 or lora7 == "None":
714
+ # lora7 = path
715
  choices = get_all_lora_tupled_list()
716
  return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
717
+ gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices), gr.update(value=lora6, choices=choices), gr.update(value=lora7, choices=choices)
718
 
719
 
720
  def get_valid_lora_name(query: str, model_name: str):
 
754
  return wt
755
 
756
 
757
+ def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt):
758
+ if not "Classic" in str(prompt_syntax): return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt
759
  lora1 = get_valid_lora_name(lora1, model_name)
760
  lora2 = get_valid_lora_name(lora2, model_name)
761
  lora3 = get_valid_lora_name(lora3, model_name)
762
  lora4 = get_valid_lora_name(lora4, model_name)
763
  lora5 = get_valid_lora_name(lora5, model_name)
764
+ #lora6 = get_valid_lora_name(lora6, model_name)
765
+ #lora7 = get_valid_lora_name(lora7, model_name)
766
+ if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt
767
  lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
768
  lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
769
  lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
770
  lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
771
  lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
772
+ #lora6_wt = get_valid_lora_wt(prompt, lora6, lora5_wt)
773
+ #lora7_wt = get_valid_lora_wt(prompt, lora7, lora5_wt)
774
  on1, label1, tag1, md1 = get_lora_info(lora1)
775
  on2, label2, tag2, md2 = get_lora_info(lora2)
776
  on3, label3, tag3, md3 = get_lora_info(lora3)
777
  on4, label4, tag4, md4 = get_lora_info(lora4)
778
  on5, label5, tag5, md5 = get_lora_info(lora5)
779
+ #on6, label6, tag6, md6 = get_lora_info(lora6)
780
+ #on7, label7, tag7, md7 = get_lora_info(lora7)
781
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
782
  prompts = prompt.split(",") if prompt else []
783
  for p in prompts:
784
  p = str(p).strip()
 
795
  continue
796
  elif not on1:
797
  lora1 = path
798
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
799
  lora1_wt = safe_float(wt)
800
  on1 = True
801
  elif not on2:
802
  lora2 = path
803
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
804
  lora2_wt = safe_float(wt)
805
  on2 = True
806
  elif not on3:
807
  lora3 = path
808
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
809
  lora3_wt = safe_float(wt)
810
  on3 = True
811
  elif not on4:
812
  lora4 = path
813
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
814
  lora4_wt = safe_float(wt)
815
  on4 = True
816
  elif not on5:
817
  lora5 = path
818
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
819
  lora5_wt = safe_float(wt)
820
  on5 = True
821
+ #elif not on6:
822
+ # lora6 = path
823
+ # lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
824
+ # lora6_wt = safe_float(wt)
825
+ # on6 = True
826
+ #elif not on7:
827
+ # lora7 = path
828
+ # lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
829
+ # lora7_wt = safe_float(wt)
830
+ # on7 = True
831
+ return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt
832
 
833
 
834
  def get_lora_info(lora_path: str):
 
889
  return gr.update(value=prompt)
890
 
891
 
892
+ def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt):
893
  on1, label1, tag1, md1 = get_lora_info(lora1)
894
  on2, label2, tag2, md2 = get_lora_info(lora2)
895
  on3, label3, tag3, md3 = get_lora_info(lora3)
896
  on4, label4, tag4, md4 = get_lora_info(lora4)
897
  on5, label5, tag5, md5 = get_lora_info(lora5)
898
+ on6, label6, tag6, md6 = get_lora_info(lora6)
899
+ on7, label7, tag7, md7 = get_lora_info(lora7)
900
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
901
 
902
  output_prompt = prompt
903
  if "Classic" in str(prompt_syntax):
 
922
  if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
923
  if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
924
  if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
925
+ #if on6: lora_prompts.append(f"<lora:{to_lora_key(lora6)}:{lora6_wt:.2f}>")
926
+ #if on7: lora_prompts.append(f"<lora:{to_lora_key(lora7)}:{lora7_wt:.2f}>")
927
  output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
928
  choices = get_all_lora_tupled_list()
929
 
 
936
  gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
937
  gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
938
  gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
939
+ gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5),\
940
+ gr.update(value=lora6, choices=choices), gr.update(value=lora6_wt),\
941
+ gr.update(value=tag6, label=label6, visible=on6), gr.update(visible=on6), gr.update(value=md6, visible=on6),\
942
+ gr.update(value=lora7, choices=choices), gr.update(value=lora7_wt),\
943
+ gr.update(value=tag7, label=label7, visible=on7), gr.update(visible=on7), gr.update(value=md7, visible=on7)
944
 
945
 
946
  def get_my_lora(link_url, romanize):
 
959
  path.resolve().rename(new_path.resolve())
960
  update_lora_dict(str(new_path))
961
  l_path = str(new_path)
 
962
  new_lora_tupled_list = get_all_lora_tupled_list()
963
  msg_lora = "Downloaded"
964
  if l_name:
 
975
  choices=new_lora_tupled_list
976
  ), gr.update(
977
  choices=new_lora_tupled_list
978
+ ), gr.update(
979
+ choices=new_lora_tupled_list
980
+ ), gr.update(
981
+ choices=new_lora_tupled_list
982
  ), gr.update(
983
  value=msg_lora
984
  )
 
1011
  choices=new_lora_tupled_list
1012
  ), gr.update(
1013
  choices=new_lora_tupled_list
1014
+ ), gr.update(
1015
+ choices=new_lora_tupled_list
1016
+ ), gr.update(
1017
+ choices=new_lora_tupled_list
1018
  )
1019
 
1020
 
1021
+ CIVITAI_SORT = ["Highest Rated", "Most Downloaded", "Most Liked", "Most Discussed", "Most Collected", "Most Buzz", "Newest"]
1022
  CIVITAI_PERIOD = ["AllTime", "Year", "Month", "Week", "Day"]
1023
+ CIVITAI_BASEMODEL = ["Pony", "Illustrious", "SDXL 1.0", "SD 1.5", "Flux.1 D", "Flux.1 S"] # , "SD 3.5"
1024
+ CIVITAI_TYPE = ["Checkpoint", "TextualInversion", "Hypernetwork", "AestheticGradient", "LORA", "LoCon", "DoRA",
1025
+ "Controlnet", "Upscaler", "MotionModule", "VAE", "Poses", "Wildcards", "Workflows", "Other"]
1026
+ CIVITAI_FILETYPE = ["Model", "VAE", "Config", "Training Data"]
1027
 
1028
 
1029
  def get_civitai_info(path):
 
1068
  sort: str = "Highest Rated", period: str = "AllTime", tag: str = "", user: str = "", page: int = 1):
1069
  user_agent = get_user_agent()
1070
  headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
1071
+ if CIVITAI_API_KEY: headers['Authorization'] = f'Bearer {{{CIVITAI_API_KEY}}}'
1072
  base_url = 'https://civitai.com/api/v1/models'
1073
  params = {'types': ['LORA'], 'sort': sort, 'period': period, 'limit': limit, 'page': int(page), 'nsfw': 'true'}
1074
  if query: params["query"] = query
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- git+https://github.com/R3gm/stablepy.git@8edabb0 # -b refactor_sampler_fix
2
  torch==2.2.0
3
  numpy<2
4
  gdown
 
1
+ git+https://github.com/R3gm/stablepy.git@a9fe2dc # -b refactor_sampler_fix
2
  torch==2.2.0
3
  numpy<2
4
  gdown
utils.py CHANGED
@@ -274,6 +274,10 @@ def get_my_lora(link_url, romanize):
274
  choices=new_lora_model_list
275
  ), gr.update(
276
  choices=new_lora_model_list
 
 
 
 
277
  ), gr.update(
278
  value=msg_lora
279
  )
 
274
  choices=new_lora_model_list
275
  ), gr.update(
276
  choices=new_lora_model_list
277
+ ), gr.update(
278
+ choices=new_lora_model_list
279
+ ), gr.update(
280
+ choices=new_lora_model_list
281
  ), gr.update(
282
  value=msg_lora
283
  )