John6666 commited on
Commit
b6e81c2
1 Parent(s): bf8874b

Upload 4 files

Browse files
Files changed (3) hide show
  1. dc.py +8 -62
  2. llmdolphin.py +5 -9
  3. modutils.py +19 -0
dc.py CHANGED
@@ -696,7 +696,7 @@ import random
696
  import json
697
  import shutil
698
  from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
699
- get_local_model_list, get_private_lora_model_lists, get_valid_lora_name,
700
  get_valid_lora_path, get_valid_lora_wt, get_lora_info, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
701
  normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history)
702
 
@@ -761,62 +761,6 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
761
 
762
  return output_image
763
 
764
- #@spaces.GPU
765
- def __infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
766
- model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
767
- lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
768
- sampler = "Euler a", vae = None, translate=True, progress=gr.Progress(track_tqdm=True)):
769
- MAX_SEED = np.iinfo(np.int32).max
770
-
771
- load_lora_cpu = False
772
- verbose_info = False
773
- gpu_duration = 59
774
-
775
- images: list[tuple[PIL.Image.Image, str | None]] = []
776
- info_state = info_images = ""
777
- progress(0, desc="Preparing...")
778
-
779
- if randomize_seed:
780
- seed = random.randint(0, MAX_SEED)
781
-
782
- generator = torch.Generator().manual_seed(seed).seed()
783
-
784
- if translate:
785
- prompt = translate_to_en(prompt)
786
- negative_prompt = translate_to_en(prompt)
787
-
788
- prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name)
789
- progress(0.5, desc="Preparing...")
790
- lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
791
- set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
792
- lora1 = get_valid_lora_path(lora1)
793
- lora2 = get_valid_lora_path(lora2)
794
- lora3 = get_valid_lora_path(lora3)
795
- lora4 = get_valid_lora_path(lora4)
796
- lora5 = get_valid_lora_path(lora5)
797
- progress(1, desc="Preparation completed. Starting inference...")
798
-
799
- progress(0, desc="Loading model...")
800
- sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0])
801
- progress(1, desc="Model loaded.")
802
- progress(0, desc="Starting Inference...")
803
- info_state, images, info_images = sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
804
- guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
805
- lora4, lora4_wt, lora5, lora5_wt, sampler,
806
- height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,
807
- None, None, None, 0.35, 100, 200, 0.1, 0.1, 1.0, 0., 1., False, "Classic", None,
808
- 1.0, 100, 10, 30, 0.55, "Use same sampler", "", "",
809
- False, True, 1, True, False, False, False, False, "./images", False, False, False, True, 1, 0.55,
810
- False, False, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
811
- False, "", "", 0.35, True, True, False, 4, 4, 32,
812
- True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, 0.0,
813
- load_lora_cpu, verbose_info, gpu_duration
814
- )
815
- progress(1, desc="Inference completed.")
816
- output_image = images[0][0] if images else None
817
-
818
- return output_image
819
-
820
 
821
  #@spaces.GPU
822
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
@@ -843,17 +787,18 @@ def get_vaes():
843
  return vae_model_list
844
 
845
 
846
- show_diffusers_model_list_detail = False
847
  cached_diffusers_model_tupled_list = get_tupled_model_list(load_diffusers_format_model)
848
- def get_diffusers_model_list():
 
849
  if show_diffusers_model_list_detail:
850
  return cached_diffusers_model_tupled_list
851
  else:
852
  return load_diffusers_format_model
853
 
854
 
855
- def enable_diffusers_model_detail(is_enable: bool = False, model_name: str = ""):
856
- global show_diffusers_model_list_detail
857
  show_diffusers_model_list_detail = is_enable
858
  new_value = model_name
859
  index = 0
@@ -863,7 +808,8 @@ def enable_diffusers_model_detail(is_enable: bool = False, model_name: str = "")
863
  new_value = cached_diffusers_model_tupled_list[index][1]
864
  else:
865
  new_value = load_diffusers_format_model[index]
866
- return gr.update(value=is_enable), gr.update(value=new_value, choices=get_diffusers_model_list())
 
867
 
868
 
869
  def load_model_prompt_dict():
 
696
  import json
697
  import shutil
698
  from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
699
+ get_local_model_list, get_private_lora_model_lists, get_valid_lora_name, get_state, set_state,
700
  get_valid_lora_path, get_valid_lora_wt, get_lora_info, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
701
  normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history)
702
 
 
761
 
762
  return output_image
763
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
764
 
765
  #@spaces.GPU
766
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
 
787
  return vae_model_list
788
 
789
 
790
+ #show_diffusers_model_list_detail = False
791
  cached_diffusers_model_tupled_list = get_tupled_model_list(load_diffusers_format_model)
792
+ def get_diffusers_model_list(state: dict = {}):
793
+ show_diffusers_model_list_detail = get_state(state, "show_diffusers_model_list_detail")
794
  if show_diffusers_model_list_detail:
795
  return cached_diffusers_model_tupled_list
796
  else:
797
  return load_diffusers_format_model
798
 
799
 
800
+ def enable_diffusers_model_detail(is_enable: bool = False, model_name: str = "", state: dict = {}):
801
+ # global show_diffusers_model_list_detail
802
  show_diffusers_model_list_detail = is_enable
803
  new_value = model_name
804
  index = 0
 
808
  new_value = cached_diffusers_model_tupled_list[index][1]
809
  else:
810
  new_value = load_diffusers_format_model[index]
811
+ set_state(state, "show_diffusers_model_list_detail", show_diffusers_model_list_detail)
812
+ return gr.update(value=is_enable), gr.update(value=new_value, choices=get_diffusers_model_list()), state
813
 
814
 
815
  def load_model_prompt_dict():
llmdolphin.py CHANGED
@@ -1434,12 +1434,10 @@ def dolphin_respond_auto(
1434
  state: dict = {},
1435
  progress=gr.Progress(track_tqdm=True),
1436
  ):
1437
- #try:
1438
  #if not is_japanese(message): return [(None, None)]
1439
  progress(0, desc="Processing...")
1440
 
1441
- print(state)#
1442
-
1443
  override_llm_format = get_state(state, "override_llm_format")
1444
  if override_llm_format: chat_template = override_llm_format
1445
  else: chat_template = llm_models[model][1]
@@ -1498,21 +1496,19 @@ def dolphin_respond_auto(
1498
  for output in stream:
1499
  outputs += output
1500
  yield [(outputs, None)], gr.update(), gr.update()
1501
- #except Exception as e:
1502
- # print(e)
1503
- # yield [("", None)], gr.update(), gr.update()
1504
 
1505
 
1506
  def dolphin_parse_simple(
1507
  message: str,
1508
  history: list[tuple[str, str]],
1509
- state: dict = {},
1510
  ):
1511
  try:
1512
- print(state)#
1513
  #if not is_japanese(message): return message
1514
  dolphin_sysprompt_mode = get_state(state, "dolphin_sysprompt_mode")
1515
- print(dolphin_sysprompt_mode)#
1516
  if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1: return message
1517
  msg = history[-1][0]
1518
  raw_prompt = get_raw_prompt(msg)
 
1434
  state: dict = {},
1435
  progress=gr.Progress(track_tqdm=True),
1436
  ):
1437
+ try:
1438
  #if not is_japanese(message): return [(None, None)]
1439
  progress(0, desc="Processing...")
1440
 
 
 
1441
  override_llm_format = get_state(state, "override_llm_format")
1442
  if override_llm_format: chat_template = override_llm_format
1443
  else: chat_template = llm_models[model][1]
 
1496
  for output in stream:
1497
  outputs += output
1498
  yield [(outputs, None)], gr.update(), gr.update()
1499
+ except Exception as e:
1500
+ print(e)
1501
+ yield [("", None)], gr.update(), gr.update()
1502
 
1503
 
1504
  def dolphin_parse_simple(
1505
  message: str,
1506
  history: list[tuple[str, str]],
1507
+ state: dict,
1508
  ):
1509
  try:
 
1510
  #if not is_japanese(message): return message
1511
  dolphin_sysprompt_mode = get_state(state, "dolphin_sysprompt_mode")
 
1512
  if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1: return message
1513
  msg = history[-1][0]
1514
  raw_prompt = get_raw_prompt(msg)
modutils.py CHANGED
@@ -52,6 +52,25 @@ def is_repo_name(s):
52
  return re.fullmatch(r'^[^/]+?/[^/]+?$', s)
53
 
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  translator = Translator()
56
  def translate_to_en(input: str):
57
  try:
 
52
  return re.fullmatch(r'^[^/]+?/[^/]+?$', s)
53
 
54
 
55
+ DEFAULT_STATE = {
56
+ "show_diffusers_model_list_detail": False,
57
+ }
58
+
59
+
60
+ def get_state(state: dict, key: str):
61
+ if key in state.keys(): return state[key]
62
+ elif key in DEFAULT_STATE.keys():
63
+ print(f"State '{key}' not found. Use dedault value.")
64
+ return DEFAULT_STATE[key]
65
+ else:
66
+ print(f"State '{key}' not found.")
67
+ return None
68
+
69
+
70
+ def set_state(state: dict, key: str, value: Any):
71
+ state[key] = value
72
+
73
+
74
  translator = Translator()
75
  def translate_to_en(input: str):
76
  try: