SunderAli17 commited on
Commit
ff88ce9
·
verified ·
1 Parent(s): 4ca17b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -30,13 +30,8 @@ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
30
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
31
  ENABLE_REFINER = os.getenv("ENABLE_REFINER", "0") == "1"
32
 
33
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
34
- if randomize_seed:
35
- seed = random.randint(0, MAX_SEED)
36
- return seed
37
-
38
- device = "cuda" if torch.cuda.is_available() else "cpu"
39
 
 
40
  if torch.cuda.is_available():
41
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
42
  pipe = DiffusionPipeline.from_pretrained("dataautogpt3/OpenDalleV1.1", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
@@ -56,6 +51,11 @@ if torch.cuda.is_available():
56
  if ENABLE_REFINER:
57
  refiner.unet = torch.compile(refiner.unet, mode="reduce_overhead", fullgraph=True)
58
 
 
 
 
 
 
59
  @spaces.GPU(enable_queue=True)
60
  def infer(
61
  prompt: str,
 
30
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
31
  ENABLE_REFINER = os.getenv("ENABLE_REFINER", "0") == "1"
32
 
 
 
 
 
 
 
33
 
34
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
35
  if torch.cuda.is_available():
36
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
37
  pipe = DiffusionPipeline.from_pretrained("dataautogpt3/OpenDalleV1.1", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
 
51
  if ENABLE_REFINER:
52
  refiner.unet = torch.compile(refiner.unet, mode="reduce_overhead", fullgraph=True)
53
 
54
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
55
+ if randomize_seed:
56
+ seed = random.randint(0, MAX_SEED)
57
+ return seed
58
+
59
  @spaces.GPU(enable_queue=True)
60
  def infer(
61
  prompt: str,