cocktailpeanut commited on
Commit
3c6eab5
·
1 Parent(s): 7d6f42b
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -5,6 +5,7 @@ import random
5
  import torch
6
  import devicetorch
7
  from diffusers import DiffusionPipeline
 
8
 
9
 
10
  # Quant
@@ -46,12 +47,13 @@ def init():
46
  bfl_repo = "cocktailpeanut/xulf-s"
47
  te_repo = "comfyanonymous/flux_text_encoders"
48
 
 
49
  scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(bfl_repo, subfolder="scheduler")
50
  #text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
51
- text_encoder = CLIPTextModel.from_pretrained("./flux_text_encoders/clip_l.safetensors", torch_dtype=dtype)
52
  tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
53
  #text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype)
54
- text_encoder_2 = T5EncoderModel.from_pretrained("./flux_text_encoders/t5xxl_fp8_e4m3fn.safetensors", torch_dtype=dtype)
55
  tokenizer_2 = T5TokenizerFast.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype)
56
  vae = AutoencoderKL.from_pretrained(bfl_repo, subfolder="vae", torch_dtype=dtype)
57
  transformer = FluxTransformer2DModel.from_pretrained(bfl_repo, subfolder="transformer", torch_dtype=dtype)
 
5
  import torch
6
  import devicetorch
7
  from diffusers import DiffusionPipeline
8
+ import os
9
 
10
 
11
  # Quant
 
47
  bfl_repo = "cocktailpeanut/xulf-s"
48
  te_repo = "comfyanonymous/flux_text_encoders"
49
 
50
+
51
  scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(bfl_repo, subfolder="scheduler")
52
  #text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
53
+ text_encoder = CLIPTextModel.from_pretrained(os.path.join(os.getcwd(), "flux_text_encoders/clip_l.safetensors"), torch_dtype=dtype)
54
  tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
55
  #text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype)
56
+ text_encoder_2 = T5EncoderModel.from_pretrained(os.path.join(os.getcwd(), "flux_text_encoders/t5xxl_fp8_e4m3fn.safetensors"), torch_dtype=dtype)
57
  tokenizer_2 = T5TokenizerFast.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype)
58
  vae = AutoencoderKL.from_pretrained(bfl_repo, subfolder="vae", torch_dtype=dtype)
59
  transformer = FluxTransformer2DModel.from_pretrained(bfl_repo, subfolder="transformer", torch_dtype=dtype)