AlekseyCalvin commited on
Commit
18bd703
·
verified ·
1 Parent(s): cb4a9fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -18
app.py CHANGED
@@ -23,10 +23,10 @@ import warnings
23
  import safetensors.torch
24
 
25
 
26
- #cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
27
- #os.environ["TRANSFORMERS_CACHE"] = cache_path
28
- #os.environ["HF_HUB_CACHE"] = cache_path
29
- #os.environ["HF_HOME"] = cache_path
30
 
31
  device = "cuda" if torch.cuda.is_available() else "cpu"
32
 
@@ -46,23 +46,25 @@ dtype = torch.bfloat16
46
  pipe = FluxWithCFGPipeline.from_pretrained("ostris/OpenFLUX.1", torch_dtype=dtype).to("cuda")
47
  pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to("cuda")
48
  #pipe.num_single_layers="0"
49
- pipe.transformer_chunk_size="0"
50
  #model.pooled_projections="(_, 1)[0]"
51
- pipe.transformer_pooled_projections_dim="(batch_size, 0)"
52
 
53
  pipe.to("cuda")
54
- #clipmodel = 'norm'
55
- #if clipmodel == "long":
56
- # model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
57
- # config = CLIPConfig.from_pretrained(model_id)
58
- #if clipmodel == "norm":
59
- # model_id = "zer0int/CLIP-GmP-ViT-L-14"
60
- # config = CLIPConfig.from_pretrained(model_id)
61
- #clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=False).to("cuda")
62
- #clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", ignore_mismatched_sizes=False, return_tensors="pt", truncation=True)
63
- #pipe.tokenizer = clip_processor.tokenizer
64
- #pipe.text_encoder = clip_model.text_model
65
- #pipe.text_encoder.dtype = torch.bfloat16
 
 
66
  torch.cuda.empty_cache()
67
 
68
  MAX_SEED = 2**32-1
 
23
  import safetensors.torch
24
 
25
 
26
+ cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
27
+ os.environ["TRANSFORMERS_CACHE"] = cache_path
28
+ os.environ["HF_HUB_CACHE"] = cache_path
29
+ os.environ["HF_HOME"] = cache_path
30
 
31
  device = "cuda" if torch.cuda.is_available() else "cpu"
32
 
 
46
  pipe = FluxWithCFGPipeline.from_pretrained("ostris/OpenFLUX.1", torch_dtype=dtype).to("cuda")
47
  pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to("cuda")
48
  #pipe.num_single_layers="0"
49
+ #pipe.transformer_chunk_size="0"
50
  #model.pooled_projections="(_, 1)[0]"
51
+ #pipe.transformer_pooled_projections_dim="(batch_size, 0)"
52
 
53
  pipe.to("cuda")
54
+ clipmodel = 'norm'
55
+ if clipmodel == "long":
56
+ model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
57
+ config = CLIPConfig.from_pretrained(model_id)
58
+ maxtokens = 77
59
+ if clipmodel == "norm":
60
+ model_id = "zer0int/CLIP-GmP-ViT-L-14"
61
+ config = CLIPConfig.from_pretrained(model_id)
62
+ maxtokens = 77
63
+ clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to("cuda")
64
+ clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
65
+ pipe.tokenizer = clip_processor.tokenizer
66
+ pipe.text_encoder = clip_model.text_model
67
+ pipe.text_encoder.dtype = torch.bfloat16
68
  torch.cuda.empty_cache()
69
 
70
  MAX_SEED = 2**32-1