John6666 commited on
Commit
edbedaa
·
verified ·
1 Parent(s): 5fac251

Upload dc.py

Browse files
Files changed (1) hide show
  1. dc.py +62 -0
dc.py CHANGED
@@ -671,6 +671,7 @@ from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_pat
671
  normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history)
672
 
673
 
 
674
  #@spaces.GPU
675
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
676
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
@@ -708,6 +709,67 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
708
  lora5 = get_valid_lora_path(lora5)
709
  progress(1, desc="Preparation completed. Starting inference...")
710
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
711
  progress(0, desc="Loading model...")
712
  sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0])
713
  progress(1, desc="Model loaded.")
 
671
  normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history)
672
 
673
 
674
+
675
  #@spaces.GPU
676
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
677
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
 
709
  lora5 = get_valid_lora_path(lora5)
710
  progress(1, desc="Preparation completed. Starting inference...")
711
 
712
+ progress(0, desc="Loading model...")
713
+ for _ in sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0]):
714
+ pass
715
+ progress(1, desc="Model loaded.")
716
+ progress(0, desc="Starting Inference...")
717
+ images = None
718
+ for info_state, stream_images, info_images in sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
719
+ guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
720
+ lora4, lora4_wt, lora5, lora5_wt, sampler,
721
+ height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,
722
+ None, None, None, 0.35, 100, 200, 0.1, 0.1, 1.0, 0., 1., False, "Classic", None,
723
+ 1.0, 100, 10, 30, 0.55, "Use same sampler", "", "",
724
+ False, True, 1, True, False, False, False, False, "./images", False, False, False, True, 1, 0.55,
725
+ False, False, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
726
+ False, "", "", 0.35, True, True, False, 4, 4, 32,
727
+ True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, 0.0,
728
+ load_lora_cpu, verbose_info, gpu_duration
729
+ ):
730
+ images = stream_images
731
+ progress(1, desc="Inference completed.")
732
+ output_image = images[0][0] if images else None
733
+
734
+ return output_image
735
+
736
+ #@spaces.GPU
737
+ def __infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
738
+ model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
739
+ lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
740
+ sampler = "Euler a", vae = None, translate=True, progress=gr.Progress(track_tqdm=True)):
741
+ import PIL
742
+ import numpy as np
743
+ MAX_SEED = np.iinfo(np.int32).max
744
+
745
+ load_lora_cpu = False
746
+ verbose_info = False
747
+ gpu_duration = 59
748
+
749
+ images: list[tuple[PIL.Image.Image, str | None]] = []
750
+ info_state = info_images = ""
751
+ progress(0, desc="Preparing...")
752
+
753
+ if randomize_seed:
754
+ seed = random.randint(0, MAX_SEED)
755
+
756
+ generator = torch.Generator().manual_seed(seed).seed()
757
+
758
+ if translate:
759
+ prompt = translate_to_en(prompt)
760
+ negative_prompt = translate_to_en(prompt)
761
+
762
+ prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name)
763
+ progress(0.5, desc="Preparing...")
764
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
765
+ set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
766
+ lora1 = get_valid_lora_path(lora1)
767
+ lora2 = get_valid_lora_path(lora2)
768
+ lora3 = get_valid_lora_path(lora3)
769
+ lora4 = get_valid_lora_path(lora4)
770
+ lora5 = get_valid_lora_path(lora5)
771
+ progress(1, desc="Preparation completed. Starting inference...")
772
+
773
  progress(0, desc="Loading model...")
774
  sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0])
775
  progress(1, desc="Model loaded.")