AlekseyCalvin commited on
Commit
a7b1a8b
1 Parent(s): e36567d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -1
app.py CHANGED
@@ -17,14 +17,23 @@ import safetensors.torch
17
  from safetensors.torch import load_file
18
  from huggingface_hub import HfFileSystem, ModelCard
19
  from huggingface_hub import login, hf_hub_download
 
 
20
 
 
 
 
 
 
 
21
 
22
  # Load LoRAs from JSON file
23
  with open('loras.json', 'r') as f:
24
  loras = json.load(f)
25
 
26
  # Initialize the base model
27
- dtype = torch.float16
 
28
  base_model = "AlekseyCalvin/SilverAgePoets_FluxS_TestAlpha_Diffusers"
29
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to("cuda")
30
  #pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
 
17
  from safetensors.torch import load_file
18
  from huggingface_hub import HfFileSystem, ModelCard
19
  from huggingface_hub import login, hf_hub_download
20
+ hf_token = os.environ.get("HF_TOKEN")
21
+ login(token=hf_token)
22
 
23
+ cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
24
+ os.environ["TRANSFORMERS_CACHE"] = cache_path
25
+ os.environ["HF_HUB_CACHE"] = cache_path
26
+ os.environ["HF_HOME"] = cache_path
27
+
28
+ torch.set_float32_matmul_precision("medium")
29
 
30
  # Load LoRAs from JSON file
31
  with open('loras.json', 'r') as f:
32
  loras = json.load(f)
33
 
34
  # Initialize the base model
35
+ dtype = torch.bfloat16
36
+ device = "cuda" if torch.cuda.is_available() else "cpu"
37
  base_model = "AlekseyCalvin/SilverAgePoets_FluxS_TestAlpha_Diffusers"
38
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to("cuda")
39
  #pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")