John6666 commited on
Commit
985aa63
·
verified ·
1 Parent(s): 42ec22d

Upload dc.py

Browse files
Files changed (1) hide show
  1. dc.py +63 -5
dc.py CHANGED
@@ -544,8 +544,8 @@ class GuiSD:
544
 
545
  info_state = "COMPLETE"
546
 
547
- #yield info_state, img, info_images
548
- return info_state, img, info_images
549
 
550
  def dynamic_gpu_duration(func, duration, *args):
551
 
@@ -614,8 +614,8 @@ def sd_gen_generate_pipeline(*args):
614
  start_time = time.time()
615
 
616
  # yield from sd_gen.generate_pipeline(*generation_args)
617
- #yield from dynamic_gpu_duration(
618
- return dynamic_gpu_duration(
619
  sd_gen.generate_pipeline,
620
  gpu_duration_arg,
621
  *generation_args,
@@ -672,7 +672,7 @@ from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_pat
672
 
673
 
674
  #@spaces.GPU
675
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
676
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
677
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
678
  sampler = "Euler a", vae = None, translate=True, progress=gr.Progress(track_tqdm=True)):
@@ -730,6 +730,64 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
730
  return output_image
731
 
732
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
733
  #@spaces.GPU
734
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
735
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
 
544
 
545
  info_state = "COMPLETE"
546
 
547
+ yield info_state, img, info_images
548
+ #return info_state, img, info_images
549
 
550
  def dynamic_gpu_duration(func, duration, *args):
551
 
 
614
  start_time = time.time()
615
 
616
  # yield from sd_gen.generate_pipeline(*generation_args)
617
+ yield from dynamic_gpu_duration(
618
+ #return dynamic_gpu_duration(
619
  sd_gen.generate_pipeline,
620
  gpu_duration_arg,
621
  *generation_args,
 
672
 
673
 
674
  #@spaces.GPU
675
+ def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
676
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
677
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
678
  sampler = "Euler a", vae = None, translate=True, progress=gr.Progress(track_tqdm=True)):
 
730
  return output_image
731
 
732
 
733
+ async def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
734
+ model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
735
+ lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
736
+ sampler = "Euler a", vae = None, translate=True, progress=gr.Progress(track_tqdm=True)):
737
+ import PIL
738
+ import numpy as np
739
+ MAX_SEED = np.iinfo(np.int32).max
740
+
741
+ load_lora_cpu = False
742
+ verbose_info = False
743
+ gpu_duration = 59
744
+
745
+ images: list[tuple[PIL.Image.Image, str | None]] = []
746
+ info_state = info_images = ""
747
+ progress(0, desc="Preparing...")
748
+
749
+ if randomize_seed:
750
+ seed = random.randint(0, MAX_SEED)
751
+
752
+ generator = torch.Generator().manual_seed(seed).seed()
753
+
754
+ if translate:
755
+ prompt = translate_to_en(prompt)
756
+ negative_prompt = translate_to_en(prompt)
757
+
758
+ prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name)
759
+ progress(0.5, desc="Preparing...")
760
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
761
+ set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
762
+ lora1 = get_valid_lora_path(lora1)
763
+ lora2 = get_valid_lora_path(lora2)
764
+ lora3 = get_valid_lora_path(lora3)
765
+ lora4 = get_valid_lora_path(lora4)
766
+ lora5 = get_valid_lora_path(lora5)
767
+ progress(1, desc="Preparation completed. Starting inference...")
768
+
769
+ progress(0, desc="Loading model...")
770
+ await sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0])
771
+ progress(1, desc="Model loaded.")
772
+ progress(0, desc="Starting Inference...")
773
+ info_state, images, info_images = await sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
774
+ guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
775
+ lora4, lora4_wt, lora5, lora5_wt, sampler,
776
+ height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,
777
+ None, None, None, 0.35, 100, 200, 0.1, 0.1, 1.0, 0., 1., False, "Classic", None,
778
+ 1.0, 100, 10, 30, 0.55, "Use same sampler", "", "",
779
+ False, True, 1, True, False, False, False, False, "./images", False, False, False, True, 1, 0.55,
780
+ False, False, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
781
+ False, "", "", 0.35, True, True, False, 4, 4, 32,
782
+ True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, 0.0,
783
+ load_lora_cpu, verbose_info, gpu_duration
784
+ )
785
+ progress(1, desc="Inference completed.")
786
+ output_image = images[0][0] if images else None
787
+
788
+ return output_image
789
+
790
+
791
  #@spaces.GPU
792
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
793
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,