kayfahaarukku commited on
Commit
517b8d9
·
verified ·
1 Parent(s): 59cc8a2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -4
app.py CHANGED
@@ -6,8 +6,6 @@ import gradio as gr
6
  import random
7
  import tqdm
8
 
9
- # HF_TOKEN = os.environ.get("HF_TOKEN") or True
10
-
11
  # Enable TQDM progress tracking
12
  tqdm.monitor_interval = 0
13
 
@@ -17,12 +15,11 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
17
  torch_dtype=torch.float16,
18
  custom_pipeline="lpw_stable_diffusion_xl",
19
  use_safetensors=True,
20
- # use_auth=HF_TOKEN,
21
  )
22
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
23
 
24
  # Function to generate an image
25
- @spaces.GPU(duration=120) # Adjust the duration as needed
26
  def generate_image(prompt, negative_prompt, use_defaults, width, height, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
27
  pipe.to('cuda') # Move the model to GPU when the function is called
28
 
 
6
  import random
7
  import tqdm
8
 
 
 
9
  # Enable TQDM progress tracking
10
  tqdm.monitor_interval = 0
11
 
 
15
  torch_dtype=torch.float16,
16
  custom_pipeline="lpw_stable_diffusion_xl",
17
  use_safetensors=True,
 
18
  )
19
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
20
 
21
  # Function to generate an image
22
+ @spaces.GPU() # Adjust the duration as needed
23
  def generate_image(prompt, negative_prompt, use_defaults, width, height, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
24
  pipe.to('cuda') # Move the model to GPU when the function is called
25