ovieyra21 commited on
Commit
88247aa
1 Parent(s): 2f24fa4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -27
app.py CHANGED
@@ -3,13 +3,14 @@ import numpy as np
3
  import random
4
  from diffusers import DiffusionPipeline
5
  import torch
 
6
 
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
 
9
  if torch.cuda.is_available():
10
  torch.cuda.max_memory_allocated(device=device)
11
  pipe = DiffusionPipeline.from_pretrained("Yntec/DreamPhotoGASM", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
- pipeline.load_lora_weights("ovieyra21/autotrain-begg7-ozit5")
13
  pipe.enable_xformers_memory_efficient_attention()
14
  pipe = pipe.to(device)
15
  else:
@@ -19,21 +20,21 @@ else:
19
  MAX_SEED = np.iinfo(np.int32).max
20
  MAX_IMAGE_SIZE = 1024
21
 
 
22
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
23
-
24
  if randomize_seed:
25
  seed = random.randint(0, MAX_SEED)
26
 
27
  generator = torch.Generator().manual_seed(seed)
28
 
29
  image = pipe(
30
- prompt = prompt,
31
- negative_prompt = negative_prompt,
32
- guidance_scale = guidance_scale,
33
- num_inference_steps = num_inference_steps,
34
- width = width,
35
- height = height,
36
- generator = generator
37
  ).images[0]
38
 
39
  return image
@@ -44,20 +45,16 @@ examples = [
44
  "A delicious ceviche cheesecake slice",
45
  ]
46
 
47
- css="""
48
  #col-container {
49
  margin: 0 auto;
50
  max-width: 520px;
51
  }
52
  """
53
 
54
- if torch.cuda.is_available():
55
- power_device = "GPU"
56
- else:
57
- power_device = "CPU"
58
 
59
  with gr.Blocks(css=css) as demo:
60
-
61
  with gr.Column(elem_id="col-container"):
62
  gr.Markdown(f"""
63
  # Text-to-Image Gradio Template
@@ -65,8 +62,7 @@ with gr.Blocks(css=css) as demo:
65
  """)
66
 
67
  with gr.Row():
68
-
69
- prompt = gr.Text(
70
  label="Prompt",
71
  show_label=False,
72
  max_lines=1,
@@ -79,8 +75,7 @@ with gr.Blocks(css=css) as demo:
79
  result = gr.Image(label="Result", show_label=False)
80
 
81
  with gr.Accordion("Advanced Settings", open=False):
82
-
83
- negative_prompt = gr.Text(
84
  label="Negative prompt",
85
  max_lines=1,
86
  placeholder="Enter a negative prompt",
@@ -98,7 +93,6 @@ with gr.Blocks(css=css) as demo:
98
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
99
 
100
  with gr.Row():
101
-
102
  width = gr.Slider(
103
  label="Width",
104
  minimum=256,
@@ -116,7 +110,6 @@ with gr.Blocks(css=css) as demo:
116
  )
117
 
118
  with gr.Row():
119
-
120
  guidance_scale = gr.Slider(
121
  label="Guidance scale",
122
  minimum=0.0,
@@ -134,14 +127,14 @@ with gr.Blocks(css=css) as demo:
134
  )
135
 
136
  gr.Examples(
137
- examples = examples,
138
- inputs = [prompt]
139
  )
140
 
141
  run_button.click(
142
- fn = infer,
143
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
144
- outputs = [result]
145
  )
146
 
147
- demo.queue().launch()
 
3
  import random
4
  from diffusers import DiffusionPipeline
5
  import torch
6
+ import spaces
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
 
10
  if torch.cuda.is_available():
11
  torch.cuda.max_memory_allocated(device=device)
12
  pipe = DiffusionPipeline.from_pretrained("Yntec/DreamPhotoGASM", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
13
+ pipe.load_lora_weights("ovieyra21/autotrain-begg7-ozit5")
14
  pipe.enable_xformers_memory_efficient_attention()
15
  pipe = pipe.to(device)
16
  else:
 
20
  MAX_SEED = np.iinfo(np.int32).max
21
  MAX_IMAGE_SIZE = 1024
22
 
23
+ @spaces.GPU(duration=120)
24
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
 
25
  if randomize_seed:
26
  seed = random.randint(0, MAX_SEED)
27
 
28
  generator = torch.Generator().manual_seed(seed)
29
 
30
  image = pipe(
31
+ prompt=prompt,
32
+ negative_prompt=negative_prompt,
33
+ guidance_scale=guidance_scale,
34
+ num_inference_steps=num_inference_steps,
35
+ width=width,
36
+ height=height,
37
+ generator=generator
38
  ).images[0]
39
 
40
  return image
 
45
  "A delicious ceviche cheesecake slice",
46
  ]
47
 
48
+ css = """
49
  #col-container {
50
  margin: 0 auto;
51
  max-width: 520px;
52
  }
53
  """
54
 
55
+ power_device = "GPU" if torch.cuda.is_available() else "CPU"
 
 
 
56
 
57
  with gr.Blocks(css=css) as demo:
 
58
  with gr.Column(elem_id="col-container"):
59
  gr.Markdown(f"""
60
  # Text-to-Image Gradio Template
 
62
  """)
63
 
64
  with gr.Row():
65
+ prompt = gr.Textbox(
 
66
  label="Prompt",
67
  show_label=False,
68
  max_lines=1,
 
75
  result = gr.Image(label="Result", show_label=False)
76
 
77
  with gr.Accordion("Advanced Settings", open=False):
78
+ negative_prompt = gr.Textbox(
 
79
  label="Negative prompt",
80
  max_lines=1,
81
  placeholder="Enter a negative prompt",
 
93
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
94
 
95
  with gr.Row():
 
96
  width = gr.Slider(
97
  label="Width",
98
  minimum=256,
 
110
  )
111
 
112
  with gr.Row():
 
113
  guidance_scale = gr.Slider(
114
  label="Guidance scale",
115
  minimum=0.0,
 
127
  )
128
 
129
  gr.Examples(
130
+ examples=examples,
131
+ inputs=[prompt]
132
  )
133
 
134
  run_button.click(
135
+ fn=infer,
136
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
137
+ outputs=[result]
138
  )
139
 
140
+ demo.queue().launch()