nsfwalex commited on
Commit
20c459f
·
verified ·
1 Parent(s): a2921b1

Update inference_manager.py

Browse files
Files changed (1) hide show
  1. inference_manager.py +4 -4
inference_manager.py CHANGED
@@ -26,7 +26,7 @@ import cv2
26
  import re
27
  import gradio as gr
28
  from PIL import Image
29
- MAX_SEED = np.iinfo(np.int32).max
30
  #from onediffx import compile_pipe, save_pipe, load_pipe
31
 
32
  HF_TOKEN = os.getenv('HF_TOKEN')
@@ -509,7 +509,7 @@ class ModelManager:
509
  p = remove_child_related_content(p)
510
  prompt_str = cfg.get("prompt", "{prompt}").replace("{prompt}", p)
511
  generator = torch.Generator(model.base_model_pipeline.device).manual_seed(seed)
512
- print(f"generate: p={p}, np={np}, steps={steps}, guidance_scale={guidance_scale}, size={width},{height}, seed={seed}")
513
  images = ip_model.generate(
514
  prompt=prompt_str,
515
  negative_prompt=negative_prompt,
@@ -558,7 +558,7 @@ class ModelManager:
558
  lora_list = inference_params.get("loras", [])
559
  seed = inference_params.get("seed", 0)
560
 
561
- pipe = model.build_pipeline_with_lora(lora_list, sampler, lora_list)
562
 
563
  start = time.time()
564
  pipe.to("cuda")
@@ -571,7 +571,7 @@ class ModelManager:
571
  p = remove_child_related_content(p)
572
  prompt_str = cfg.get("prompt", "{prompt}").replace("{prompt}", p)
573
  generator = torch.Generator(pipe.device).manual_seed(seed)
574
- print(f"generate: p={p}, np={np}, steps={steps}, guidance_scale={guidance_scale}, size={width},{height}, seed={seed}")
575
  images = pipe(
576
  prompt=prompt_str,
577
  negative_prompt=negative_prompt,
 
26
  import re
27
  import gradio as gr
28
  from PIL import Image
29
+ MAX_SEED = 12211231#np.iinfo(np.int32).max
30
  #from onediffx import compile_pipe, save_pipe, load_pipe
31
 
32
  HF_TOKEN = os.getenv('HF_TOKEN')
 
509
  p = remove_child_related_content(p)
510
  prompt_str = cfg.get("prompt", "{prompt}").replace("{prompt}", p)
511
  generator = torch.Generator(model.base_model_pipeline.device).manual_seed(seed)
512
+ print(f"generate: p={p}, np={negative_prompt}, steps={steps}, guidance_scale={guidance_scale}, size={width},{height}, seed={seed}")
513
  images = ip_model.generate(
514
  prompt=prompt_str,
515
  negative_prompt=negative_prompt,
 
558
  lora_list = inference_params.get("loras", [])
559
  seed = inference_params.get("seed", 0)
560
 
561
+ pipe = model.build_pipeline_with_lora(lora_list, sampler)
562
 
563
  start = time.time()
564
  pipe.to("cuda")
 
571
  p = remove_child_related_content(p)
572
  prompt_str = cfg.get("prompt", "{prompt}").replace("{prompt}", p)
573
  generator = torch.Generator(pipe.device).manual_seed(seed)
574
+ print(f"generate: p={p}, np={negative_prompt}, steps={steps}, guidance_scale={guidance_scale}, size={width},{height}, seed={seed}")
575
  images = pipe(
576
  prompt=prompt_str,
577
  negative_prompt=negative_prompt,