BestWishYsh commited on
Commit
9bc5a81
·
verified ·
1 Parent(s): a7cdd0c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -5
app.py CHANGED
@@ -31,17 +31,21 @@ from models.eva_clip import create_model_and_transforms
31
  from models.eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
32
  from models.eva_clip.utils_qformer import resize_numpy_image_long
33
 
34
- device = "cuda" if torch.cuda.is_available() else "cpu"
35
 
36
- hf_hub_download(repo_id="ai-forever/Real-ESRGAN", filename="RealESRGAN_x4.pth", local_dir="model_real_esran")
37
- snapshot_download(repo_id="AlexWortega/RIFE", local_dir="model_rife")
38
- snapshot_download(repo_id="BestWishYsh/ConsisID-preview", local_dir="BestWishYsh/ConsisID-preview")
39
 
40
- model_path = "BestWishYsh/ConsisID-preview"
41
  lora_path = None
42
  lora_rank = 128
43
  dtype = torch.bfloat16
 
44
 
 
 
 
 
 
 
 
45
  if os.path.exists(os.path.join(model_path, "transformer_ema")):
46
  subfolder = "transformer_ema"
47
  else:
@@ -110,8 +114,12 @@ if "variance_type" in pipe.scheduler.config:
110
  pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config, **scheduler_args)
111
  pipe.to(device)
112
 
 
 
113
  pipe.enable_model_cpu_offload()
114
  pipe.enable_sequential_cpu_offload()
 
 
115
 
116
  os.makedirs("./output", exist_ok=True)
117
  os.makedirs("./gradio_tmp", exist_ok=True)
 
31
  from models.eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
32
  from models.eva_clip.utils_qformer import resize_numpy_image_long
33
 
 
34
 
35
+ model_path = "ckpts"
 
 
36
 
 
37
  lora_path = None
38
  lora_rank = 128
39
  dtype = torch.bfloat16
40
+ device = "cuda" if torch.cuda.is_available() else "cpu"
41
 
42
+ if not os.path.exists(model_path) or not os.path.exists(f"{model_path}/model_real_esran") or not os.path.exists(f"{model_path}/model_rife"):
43
+ hf_hub_download(repo_id="ai-forever/Real-ESRGAN", filename="RealESRGAN_x4.pth", local_dir=f"{model_path}/model_real_esran")
44
+ snapshot_download(repo_id="AlexWortega/RIFE", local_dir=f"{model_path}/model_rife")
45
+ snapshot_download(repo_id="BestWishYsh/ConsisID-preview", local_dir=f"{model_path}")
46
+ else:
47
+ print(f"Model already exists in {model_path}, skipping download.")
48
+
49
  if os.path.exists(os.path.join(model_path, "transformer_ema")):
50
  subfolder = "transformer_ema"
51
  else:
 
114
  pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config, **scheduler_args)
115
  pipe.to(device)
116
 
117
+ # Enable CPU offload for the model.
118
+ # turn on if you don't have multiple GPUs or enough GPU memory(such as H100) and it will cost more time in inference, it may also reduce the quality
119
  pipe.enable_model_cpu_offload()
120
  pipe.enable_sequential_cpu_offload()
121
+ # pipe.vae.enable_slicing()
122
+ # pipe.vae.enable_tiling()
123
 
124
  os.makedirs("./output", exist_ok=True)
125
  os.makedirs("./gradio_tmp", exist_ok=True)