salomonsky commited on
Commit
481dde5
1 Parent(s): 32fdddd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -3
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import gradio as gr
3
  import numpy as np
4
  import random
@@ -16,6 +17,20 @@ MAX_SEED = np.iinfo(np.int32).max
16
  HF_TOKEN = os.environ.get("HF_TOKEN")
17
  HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  def enable_lora(lora_add, basemodel):
20
  return basemodel if not lora_add else lora_add
21
 
@@ -41,7 +56,15 @@ def get_upscale_finegrain(prompt, img_path, upscale_factor):
41
  print(f"Error upscale image: {e}")
42
  return None
43
 
44
- async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
 
 
 
 
 
 
 
 
45
  model = enable_lora(lora_model, basemodel) if process_lora else basemodel
46
  image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
47
  if image is None:
@@ -51,7 +74,10 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
51
  image.save(image_path, format="JPEG")
52
 
53
  if process_upscale:
54
- upscale_image = get_upscale_finegrain(prompt, image_path, upscale_factor)
 
 
 
55
  upscale_image_path = "upscale_image.jpg"
56
  upscale_image.save(upscale_image_path, format="JPEG")
57
  return [image_path, upscale_image_path]
@@ -74,6 +100,7 @@ with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
74
  process_lora = gr.Checkbox(label="Procesar LORA")
75
  process_upscale = gr.Checkbox(label="Procesar Escalador")
76
  upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
 
77
 
78
  with gr.Accordion(label="Opciones Avanzadas", open=False):
79
  width = gr.Slider(label="Ancho", minimum=512, maximum=1280, step=8, value=1280)
@@ -90,7 +117,7 @@ with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
90
  queue=False
91
  ).then(
92
  fn=gen,
93
- inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora],
94
  outputs=[output_res]
95
  )
96
 
 
1
  import os
2
+ import torch
3
  import gradio as gr
4
  import numpy as np
5
  import random
 
17
  HF_TOKEN = os.environ.get("HF_TOKEN")
18
  HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
19
 
20
+ if not os.path.exists('GFPGANv1.4.pth'):
21
+ os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P .")
22
+
23
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
24
+ model_path = 'GFPGANv1.4.pth'
25
+ gfpgan = GFPGANer(
26
+ model_path=model_path,
27
+ upscale_factor=4,
28
+ arch='clean',
29
+ channel_multiplier=2,
30
+ model_name='GPFGAN',
31
+ device=device
32
+ )
33
+
34
  def enable_lora(lora_add, basemodel):
35
  return basemodel if not lora_add else lora_add
36
 
 
56
  print(f"Error upscale image: {e}")
57
  return None
58
 
59
+ def get_upscale_gfpgan(prompt, img_path):
60
+ try:
61
+ img = gfpgan.enhance(img_path)
62
+ return img
63
+ except Exception as e:
64
+ print(f"Error upscale image: {e}")
65
+ return None
66
+
67
+ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora, upscale_model):
68
  model = enable_lora(lora_model, basemodel) if process_lora else basemodel
69
  image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
70
  if image is None:
 
74
  image.save(image_path, format="JPEG")
75
 
76
  if process_upscale:
77
+ if upscale_model == "FineGrain":
78
+ upscale_image = get_upscale_finegrain(prompt, image_path, upscale_factor)
79
+ elif upscale_model == "GPFGAN":
80
+ upscale_image = get_upscale_gfpgan(prompt, image_path)
81
  upscale_image_path = "upscale_image.jpg"
82
  upscale_image.save(upscale_image_path, format="JPEG")
83
  return [image_path, upscale_image_path]
 
100
  process_lora = gr.Checkbox(label="Procesar LORA")
101
  process_upscale = gr.Checkbox(label="Procesar Escalador")
102
  upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
103
+ upscale_model = gr.Radio(label="Modelo de Escalado", choices=["FineGrain", "GPFGAN"], value="FineGrain")
104
 
105
  with gr.Accordion(label="Opciones Avanzadas", open=False):
106
  width = gr.Slider(label="Ancho", minimum=512, maximum=1280, step=8, value=1280)
 
117
  queue=False
118
  ).then(
119
  fn=gen,
120
+ inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora, upscale_model],
121
  outputs=[output_res]
122
  )
123