SkyeBrowse Developer commited on
Commit
56cd3fd
1 Parent(s): c862e7a

local and empty cache

Browse files
Files changed (2) hide show
  1. anime_app.py +1 -19
  2. local_anime_app.py +120 -118
anime_app.py CHANGED
@@ -284,22 +284,6 @@ with gr.Blocks("bethecloud/storj_theme", css=css) as demo:
284
  def auto_process_image(image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
285
  return process_image(image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
286
 
287
- # # AI Image Processing
288
- # @gr.on(triggers=[use_ai_button.click], inputs=config, outputs=result, show_progress="minimal")
289
- # def submit(image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
290
- # return process_image(image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
291
-
292
- # # Change input to result
293
- # @gr.on(triggers=[use_ai_button.click], inputs=None, outputs=image, show_progress="hidden")
294
- # def update_input():
295
- # try:
296
- # print("Updating image to AI Temp Image")
297
- # ai_temp_image = Image.open("temp_image.jpg")
298
- # return ai_temp_image
299
- # except FileNotFoundError:
300
- # print("No AI Image Available")
301
- # return None
302
-
303
  @gr.on(triggers=[use_ai_button.click], inputs=[result] + config, outputs=[image, result], show_progress="minimal")
304
  def submit(previous_result, image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
305
  # First, yield the previous result to update the input image immediately
@@ -362,10 +346,8 @@ def process_image(
362
  generator=generator,
363
  image=control_image,
364
  ).images[0]
365
- # torch.cuda.synchronize()
366
- # torch.cuda.empty_cache()
367
  print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
368
- # results.save("temp_image.jpg")
369
  return results
370
 
371
  if prod:
 
284
  def auto_process_image(image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
285
  return process_image(image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  @gr.on(triggers=[use_ai_button.click], inputs=[result] + config, outputs=[image, result], show_progress="minimal")
288
  def submit(previous_result, image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
289
  # First, yield the previous result to update the input image immediately
 
346
  generator=generator,
347
  image=control_image,
348
  ).images[0]
 
 
349
  print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
350
+ torch.cuda.empty_cache()
351
  return results
352
 
353
  if prod:
local_anime_app.py CHANGED
@@ -1,7 +1,8 @@
1
- prod = True
 
2
  show_options = False
3
  if prod:
4
- port = 8082
5
  # show_options = False
6
 
7
  import os
@@ -10,81 +11,116 @@ import random
10
  import time
11
  import gradio as gr
12
  import numpy as np
13
- import imageio
14
  from huggingface_hub import HfApi
15
  import torch
 
16
  from PIL import Image
17
  from diffusers import (
18
  ControlNetModel,
19
  DPMSolverMultistepScheduler,
20
  StableDiffusionControlNetPipeline,
 
21
  )
 
 
22
  from diffusers.models.attention_processor import AttnProcessor2_0
23
  MAX_SEED = np.iinfo(np.int32).max
24
  API_KEY = os.environ.get("API_KEY", None)
25
 
26
  print("CUDA version:", torch.version.cuda)
27
- print("loading pipe")
28
  compiled = False
29
- from preprocess import Preprocessor
30
- preprocessor = Preprocessor()
31
  api = HfApi()
32
 
33
- if gr.NO_RELOAD:
34
- torch.cuda.max_memory_allocated(device="cuda")
35
- # Controlnet Normal
36
- model_id = "lllyasviel/control_v11p_sd15_normalbae"
37
- print("initializing controlnet")
38
- controlnet = ControlNetModel.from_pretrained(
39
- model_id,
40
- torch_dtype=torch.float16,
41
- attn_implementation="flash_attention_2",
42
- ).to("cuda")
43
 
44
- # Scheduler
45
- scheduler = DPMSolverMultistepScheduler.from_pretrained(
46
- "runwayml/stable-diffusion-v1-5",
47
- solver_order=2,
48
- subfolder="scheduler",
49
- use_karras_sigmas=True,
50
- final_sigmas_type="sigma_min",
51
- algorithm_type="sde-dpmsolver++",
52
- prediction_type="epsilon",
53
- thresholding=False,
54
- denoise_final=True,
55
- device_map="cuda",
56
- )
57
 
58
- # Stable Diffusion Pipeline URL
59
- base_model_url = "https://huggingface.co/broyang/hentaidigitalart_v20/blob/main/realcartoon3d_v15.safetensors"
 
 
 
 
 
 
 
 
 
60
 
61
- pipe = StableDiffusionControlNetPipeline.from_single_file(
62
- base_model_url,
63
- # safety_checker=None,
64
- # load_safety_checker=True,
65
- controlnet=controlnet,
66
- scheduler=scheduler,
67
- torch_dtype=torch.float16,
68
- )
69
 
70
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="EasyNegativeV2.safetensors", token="EasyNegativeV2",)
71
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="badhandv4.pt", token="badhandv4")
72
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Ahegao.pt", token="HDA_Ahegao")
73
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Bondage.pt", token="HDA_Bondage")
74
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_pet_play.pt", token="HDA_pet_play")
75
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="fcNeg-neg.pt", token="fcNeg-neg")
76
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_unconventional maid.pt", token="HDA_unconventional_maid")
77
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_NakedHoodie.pt", token="HDA_NakedHoodie")
78
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_NunDress.pt", token="HDA_NunDress")
79
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Shibari.pt", token="HDA_Shibari")
80
- pipe.to("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
- print("---------------Loaded controlnet pipeline---------------")
83
- pipe.unet.set_attn_processor(AttnProcessor2_0())
84
- torch.cuda.empty_cache()
85
- gc.collect()
86
- print(f"CUDA memory allocated: {torch.cuda.max_memory_allocated(device='cuda') / 1e9:.2f} GB")
87
- print("Model Compiled!")
88
 
89
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
90
  if randomize_seed:
@@ -100,8 +136,9 @@ def get_additional_prompt():
100
  # outfit = ["schoolgirl outfit", "playboy outfit", "red dress", "gala dress", "cheerleader outfit", "nurse outfit", "Kimono"]
101
 
102
  def get_prompt(prompt, additional_prompt):
103
- default = "hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
104
- default2 = f"professional 3d model {prompt},octane render,highly detailed,volumetric,dramatic lighting,hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
 
105
  randomize = get_additional_prompt()
106
  # nude = "NSFW,((nude)),medium bare breasts,hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
107
  # bodypaint = "((fully naked with no clothes)),nude naked seethroughxray,invisiblebodypaint,rating_newd,NSFW"
@@ -141,7 +178,7 @@ footer {
141
  visibility: hidden;
142
  }
143
  .gradio-container {
144
- max-width: 900px !important;
145
  }
146
  .gr-image {
147
  display: flex;
@@ -158,7 +195,7 @@ footer {
158
  object-position: center;
159
  }
160
  """
161
- with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
162
  #############################################################################
163
  with gr.Row():
164
  with gr.Accordion("Advanced options", open=show_options, visible=show_options):
@@ -200,32 +237,32 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
200
  with gr.Column():
201
  prompt = gr.Textbox(
202
  label="Description",
203
- placeholder="Leave empty for a random output 👀",
204
  )
205
  # input image
206
- with gr.Row():
207
- with gr.Column():
208
  image = gr.Image(
209
  label="Input",
210
  sources=["upload"],
211
  show_label=True,
212
  mirror_webcam=True,
213
- format="webp",
214
  )
215
  # run button
216
  with gr.Column():
217
- run_button = gr.Button(value="Use this one", size=["lg"], visible=False)
218
  # output image
219
- with gr.Column():
220
  result = gr.Image(
221
- label="Anime AI",
222
  interactive=False,
223
- format="webp",
224
  show_share_button= False,
225
  )
226
  # Use this image button
227
  with gr.Column():
228
- use_ai_button = gr.Button(value="Use this one", size=["lg"], visible=False)
229
  config = [
230
  image,
231
  prompt,
@@ -247,22 +284,15 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
247
  def auto_process_image(image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
248
  return process_image(image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
249
 
250
- # AI Image Processing
251
- @gr.on(triggers=[use_ai_button.click], inputs=config, outputs=result, show_progress="minimal")
252
- def submit(image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
253
- return process_image(image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
254
-
255
- # Change input to result
256
- @gr.on(triggers=[use_ai_button.click], inputs=None, outputs=image, show_progress="hidden")
257
- def update_input():
258
- try:
259
- print("Updating image to AI Temp Image")
260
- ai_temp_image = Image.open("temp_image.jpg")
261
- return ai_temp_image
262
- except FileNotFoundError:
263
- print("No AI Image Available")
264
- return None
265
-
266
  # Turn off buttons when processing
267
  @gr.on(triggers=[image.upload, use_ai_button.click, run_button.click], inputs=None, outputs=[run_button, use_ai_button], show_progress="hidden")
268
  def turn_buttons_off():
@@ -274,6 +304,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
274
  return gr.update(visible=True), gr.update(visible=True)
275
 
276
 
 
277
  @torch.inference_mode()
278
  def process_image(
279
  image,
@@ -288,13 +319,12 @@ def process_image(
288
  seed,
289
  progress=gr.Progress(track_tqdm=True)
290
  ):
291
- torch.cuda.synchronize()
292
  preprocess_start = time.time()
293
  print("processing image")
294
- preprocessor.load("NormalBae")
295
-
296
  seed = random.randint(0, MAX_SEED)
297
  generator = torch.cuda.manual_seed(seed)
 
298
  control_image = preprocessor(
299
  image=image,
300
  image_resolution=image_resolution,
@@ -305,7 +335,7 @@ def process_image(
305
  custom_prompt=str(get_prompt(prompt, a_prompt))
306
  negative_prompt=str(n_prompt)
307
  print(f"{custom_prompt}")
308
-
309
  start = time.time()
310
  results = pipe(
311
  prompt=custom_prompt,
@@ -316,36 +346,8 @@ def process_image(
316
  generator=generator,
317
  image=control_image,
318
  ).images[0]
319
- torch.cuda.synchronize()
320
- torch.cuda.empty_cache()
321
- print(f"\n-------------------------Preprocess done in: {preprocess_time:.2f} seconds-------------------------")
322
  print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
323
-
324
- # if not os.path.exists("./outputs"):
325
- # os.makedirs("./outputs")
326
- # img_path = f"./outputs/{timestamp}.jpg"
327
- # results_path = f"./outputs/{timestamp}_out_{prompt}.jpg"
328
- # imageio.imsave(img_path, image)
329
- # results.save(results_path)
330
- results.save("temp_image.jpg")
331
-
332
- # api.upload_file(
333
- # path_or_fileobj=img_path,
334
- # path_in_repo=img_path,
335
- # repo_id="broyang/anime-ai-outputs",
336
- # repo_type="dataset",
337
- # token=API_KEY,
338
- # run_as_future=True,
339
- # )
340
- # api.upload_file(
341
- # path_or_fileobj=results_path,
342
- # path_in_repo=results_path,
343
- # repo_id="broyang/anime-ai-outputs",
344
- # repo_type="dataset",
345
- # token=API_KEY,
346
- # run_as_future=True,
347
- # )
348
-
349
  return results
350
 
351
  if prod:
 
1
+ prod = False
2
+ port = 8080
3
  show_options = False
4
  if prod:
5
+ port = 8081
6
  # show_options = False
7
 
8
  import os
 
11
  import time
12
  import gradio as gr
13
  import numpy as np
14
+ # import imageio
15
  from huggingface_hub import HfApi
16
  import torch
17
+ # import spaces
18
  from PIL import Image
19
  from diffusers import (
20
  ControlNetModel,
21
  DPMSolverMultistepScheduler,
22
  StableDiffusionControlNetPipeline,
23
+ # AutoencoderKL,
24
  )
25
+ from controlnet_aux_local import NormalBaeDetector
26
+ # from controlnet_aux import NormalBaeDetector
27
  from diffusers.models.attention_processor import AttnProcessor2_0
28
  MAX_SEED = np.iinfo(np.int32).max
29
  API_KEY = os.environ.get("API_KEY", None)
30
 
31
  print("CUDA version:", torch.version.cuda)
32
+ print("loading everything")
33
  compiled = False
 
 
34
  api = HfApi()
35
 
36
+ class Preprocessor:
37
+ MODEL_ID = "lllyasviel/Annotators"
 
 
 
 
 
 
 
 
38
 
39
+ def __init__(self):
40
+ self.model = None
41
+ self.name = ""
 
 
 
 
 
 
 
 
 
 
42
 
43
+ def load(self, name: str) -> None:
44
+ if name == self.name:
45
+ return
46
+ elif name == "NormalBae":
47
+ print("Loading NormalBae")
48
+ self.model = NormalBaeDetector.from_pretrained(self.MODEL_ID).to("cuda")
49
+ torch.cuda.empty_cache()
50
+ self.name = name
51
+ else:
52
+ raise ValueError
53
+ return
54
 
55
+ def __call__(self, image: Image.Image, **kwargs) -> Image.Image:
56
+ return self.model(image, **kwargs)
 
 
 
 
 
 
57
 
58
+ # torch.cuda.max_memory_allocated(device="cuda")
59
+
60
+ # Controlnet Normal
61
+ model_id = "lllyasviel/control_v11p_sd15_normalbae"
62
+ print("initializing controlnet")
63
+ controlnet = ControlNetModel.from_pretrained(
64
+ model_id,
65
+ torch_dtype=torch.float16,
66
+ attn_implementation="flash_attention_2",
67
+ ).to("cuda")
68
+
69
+ # Scheduler
70
+ scheduler = DPMSolverMultistepScheduler.from_pretrained(
71
+ "runwayml/stable-diffusion-v1-5",
72
+ solver_order=2,
73
+ subfolder="scheduler",
74
+ use_karras_sigmas=True,
75
+ final_sigmas_type="sigma_min",
76
+ algorithm_type="sde-dpmsolver++",
77
+ prediction_type="epsilon",
78
+ thresholding=False,
79
+ denoise_final=True,
80
+ device_map="cuda",
81
+ torch_dtype=torch.float16,
82
+ )
83
+
84
+ # Stable Diffusion Pipeline URL
85
+ base_model_url = "https://huggingface.co/broyang/hentaidigitalart_v20/blob/main/realcartoon3d_v15.safetensors"
86
+ # base_model_url = "https://huggingface.co/Lykon/AbsoluteReality/blob/main/AbsoluteReality_1.8.1_pruned.safetensors"
87
+ # vae_url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors"
88
+
89
+ # print('loading vae')
90
+ # vae = AutoencoderKL.from_single_file(vae_url, torch_dtype=torch.float16).to("cuda")
91
+ # vae.to(memory_format=torch.channels_last)
92
+
93
+ print('loading pipe')
94
+ pipe = StableDiffusionControlNetPipeline.from_single_file(
95
+ base_model_url,
96
+ safety_checker=None,
97
+ controlnet=controlnet,
98
+ scheduler=scheduler,
99
+ # vae=vae,
100
+ torch_dtype=torch.float16,
101
+ ).to("cuda")
102
+
103
+ print("loading preprocessor")
104
+ preprocessor = Preprocessor()
105
+ preprocessor.load("NormalBae")
106
+ # pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="EasyNegativeV2.safetensors", token="EasyNegativeV2",)
107
+ # pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="badhandv4.pt", token="badhandv4")
108
+ # pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="fcNeg-neg.pt", token="fcNeg-neg")
109
+ # pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Ahegao.pt", token="HDA_Ahegao")
110
+ # pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Bondage.pt", token="HDA_Bondage")
111
+ # pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_pet_play.pt", token="HDA_pet_play")
112
+ # pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_unconventional maid.pt", token="HDA_unconventional_maid")
113
+ # pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_NakedHoodie.pt", token="HDA_NakedHoodie")
114
+ # pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_NunDress.pt", token="HDA_NunDress")
115
+ # pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Shibari.pt", token="HDA_Shibari")
116
+ pipe.to("cuda")
117
+
118
+ print("---------------Loaded controlnet pipeline---------------")
119
+ torch.cuda.empty_cache()
120
+ gc.collect()
121
+ print(f"CUDA memory allocated: {torch.cuda.max_memory_allocated(device='cuda') / 1e9:.2f} GB")
122
+ print("Model Compiled!")
123
 
 
 
 
 
 
 
124
 
125
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
126
  if randomize_seed:
 
136
  # outfit = ["schoolgirl outfit", "playboy outfit", "red dress", "gala dress", "cheerleader outfit", "nurse outfit", "Kimono"]
137
 
138
  def get_prompt(prompt, additional_prompt):
139
+ default = "hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed,tungsten white balance"
140
+ # default2 = f"professional 3d model {prompt},octane render,highly detailed,volumetric,dramatic lighting,hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
141
+ default2 = f"hyperrealistic photography of {prompt},extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
142
  randomize = get_additional_prompt()
143
  # nude = "NSFW,((nude)),medium bare breasts,hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
144
  # bodypaint = "((fully naked with no clothes)),nude naked seethroughxray,invisiblebodypaint,rating_newd,NSFW"
 
178
  visibility: hidden;
179
  }
180
  .gradio-container {
181
+ max-width: 1100px !important;
182
  }
183
  .gr-image {
184
  display: flex;
 
195
  object-position: center;
196
  }
197
  """
198
+ with gr.Blocks("bethecloud/storj_theme", css=css) as demo:
199
  #############################################################################
200
  with gr.Row():
201
  with gr.Accordion("Advanced options", open=show_options, visible=show_options):
 
237
  with gr.Column():
238
  prompt = gr.Textbox(
239
  label="Description",
240
+ placeholder="Enter a description (optional)",
241
  )
242
  # input image
243
+ with gr.Row(equal_height=True):
244
+ with gr.Column(scale=1, min_width=300):
245
  image = gr.Image(
246
  label="Input",
247
  sources=["upload"],
248
  show_label=True,
249
  mirror_webcam=True,
250
+ type="pil",
251
  )
252
  # run button
253
  with gr.Column():
254
+ run_button = gr.Button(value="Use this one", size="lg", visible=False)
255
  # output image
256
+ with gr.Column(scale=1, min_width=300):
257
  result = gr.Image(
258
+ label="Output",
259
  interactive=False,
260
+ type="pil",
261
  show_share_button= False,
262
  )
263
  # Use this image button
264
  with gr.Column():
265
+ use_ai_button = gr.Button(value="Use this one", size="lg", visible=False)
266
  config = [
267
  image,
268
  prompt,
 
284
  def auto_process_image(image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
285
  return process_image(image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
286
 
287
+ @gr.on(triggers=[use_ai_button.click], inputs=[result] + config, outputs=[image, result], show_progress="minimal")
288
+ def submit(previous_result, image, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
289
+ # First, yield the previous result to update the input image immediately
290
+ yield previous_result, gr.update()
291
+ # Then, process the new input image
292
+ new_result = process_image(previous_result, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
293
+ # Finally, yield the new result
294
+ yield previous_result, new_result
295
+
 
 
 
 
 
 
 
296
  # Turn off buttons when processing
297
  @gr.on(triggers=[image.upload, use_ai_button.click, run_button.click], inputs=None, outputs=[run_button, use_ai_button], show_progress="hidden")
298
  def turn_buttons_off():
 
304
  return gr.update(visible=True), gr.update(visible=True)
305
 
306
 
307
+ # @spaces.GPU(duration=12)
308
  @torch.inference_mode()
309
  def process_image(
310
  image,
 
319
  seed,
320
  progress=gr.Progress(track_tqdm=True)
321
  ):
322
+ # torch.cuda.synchronize()
323
  preprocess_start = time.time()
324
  print("processing image")
 
 
325
  seed = random.randint(0, MAX_SEED)
326
  generator = torch.cuda.manual_seed(seed)
327
+ preprocessor.load("NormalBae")
328
  control_image = preprocessor(
329
  image=image,
330
  image_resolution=image_resolution,
 
335
  custom_prompt=str(get_prompt(prompt, a_prompt))
336
  negative_prompt=str(n_prompt)
337
  print(f"{custom_prompt}")
338
+ print(f"\n-------------------------Preprocess done in: {preprocess_time:.2f} seconds-------------------------")
339
  start = time.time()
340
  results = pipe(
341
  prompt=custom_prompt,
 
346
  generator=generator,
347
  image=control_image,
348
  ).images[0]
 
 
 
349
  print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
350
+ torch.cuda.empty_cache()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
  return results
352
 
353
  if prod: