bunarivenna commited on
Commit
df70159
1 Parent(s): c725615

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -2
app.py CHANGED
@@ -28,7 +28,19 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
28
  MAX_SEED = np.iinfo(np.int32).max
29
 
30
  if not torch.cuda.is_available():
31
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  MAX_SEED = np.iinfo(np.int32).max
34
 
@@ -48,7 +60,7 @@ if torch.cuda.is_available():
48
  pipe.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle")
49
  pipe.set_adapters("dalle")
50
 
51
- pipe.enable_model_cpu_offload()
52
 
53
 
54
 
 
28
  MAX_SEED = np.iinfo(np.int32).max
29
 
30
  if not torch.cuda.is_available():
31
+ DESCRIPTION += "\n<p>Running on CPU 🥶</p>"
32
+ pipe = StableDiffusionXLPipeline.from_pretrained(
33
+ "fluently/Fluently-XL-Final",
34
+ torch_dtype=torch.float16,
35
+ use_safetensors=True,
36
+ )
37
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
38
+
39
+
40
+ pipe.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle")
41
+ pipe.set_adapters("dalle")
42
+
43
+ pipe.enable_model_cpu_offload()
44
 
45
  MAX_SEED = np.iinfo(np.int32).max
46
 
 
60
  pipe.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle")
61
  pipe.set_adapters("dalle")
62
 
63
+ pipe.to("cuda")
64
 
65
 
66