Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -14,12 +14,11 @@ from gradio_imageslider import ImageSlider
|
|
14 |
|
15 |
translator = Translator()
|
16 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
17 |
-
basemodel = "black-forest-labs/FLUX.1-schnell"
|
18 |
MAX_SEED = np.iinfo(np.int32).max
|
19 |
CSS = "footer { visibility: hidden; }"
|
20 |
JS = "function () { gradioURL = window.location.href; if (!gradioURL.endsWith('?__theme=dark')) { window.location.replace(gradioURL + '?__theme=dark'); } }"
|
21 |
|
22 |
-
def enable_lora(lora_add):
|
23 |
return basemodel if not lora_add else lora_add
|
24 |
|
25 |
async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
|
@@ -34,8 +33,8 @@ async def generate_image(prompt, model, lora_word, width, height, scales, steps,
|
|
34 |
except Exception as e:
|
35 |
raise gr.Error(f"Error en {e}")
|
36 |
|
37 |
-
async def gen(prompt, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, process_upscale):
|
38 |
-
model = enable_lora(lora_add)
|
39 |
image, seed = await generate_image(prompt, model, lora_word, width, height, scales, steps, seed)
|
40 |
image_path = "temp_image.png"
|
41 |
image.save(image_path)
|
@@ -68,6 +67,7 @@ with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
|
|
68 |
with gr.Column(scale=0.8):
|
69 |
with gr.Group():
|
70 |
prompt = gr.Textbox(label="Prompt")
|
|
|
71 |
lora_add = gr.Textbox(label="Add Flux LoRA", info="Modelo Lora", lines=1, value="XLabs-AI/flux-RealismLora")
|
72 |
lora_word = gr.Textbox(label="Add Flux LoRA Trigger Word", info="Add the Trigger Word", lines=1, value="")
|
73 |
width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=512)
|
@@ -86,7 +86,7 @@ with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
|
|
86 |
queue=False
|
87 |
).then(
|
88 |
fn=gen,
|
89 |
-
inputs=[prompt, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, process_upscale],
|
90 |
outputs=[output_res]
|
91 |
)
|
92 |
|
|
|
14 |
|
15 |
translator = Translator()
|
16 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
|
|
17 |
MAX_SEED = np.iinfo(np.int32).max
|
18 |
CSS = "footer { visibility: hidden; }"
|
19 |
JS = "function () { gradioURL = window.location.href; if (!gradioURL.endsWith('?__theme=dark')) { window.location.replace(gradioURL + '?__theme=dark'); } }"
|
20 |
|
21 |
+
def enable_lora(lora_add, basemodel):
|
22 |
return basemodel if not lora_add else lora_add
|
23 |
|
24 |
async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
|
|
|
33 |
except Exception as e:
|
34 |
raise gr.Error(f"Error en {e}")
|
35 |
|
36 |
+
async def gen(prompt, basemodel, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, process_upscale):
|
37 |
+
model = enable_lora(lora_add, basemodel)
|
38 |
image, seed = await generate_image(prompt, model, lora_word, width, height, scales, steps, seed)
|
39 |
image_path = "temp_image.png"
|
40 |
image.save(image_path)
|
|
|
67 |
with gr.Column(scale=0.8):
|
68 |
with gr.Group():
|
69 |
prompt = gr.Textbox(label="Prompt")
|
70 |
+
basemodel_choice = gr.Radio(label="Base Model", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell")
|
71 |
lora_add = gr.Textbox(label="Add Flux LoRA", info="Modelo Lora", lines=1, value="XLabs-AI/flux-RealismLora")
|
72 |
lora_word = gr.Textbox(label="Add Flux LoRA Trigger Word", info="Add the Trigger Word", lines=1, value="")
|
73 |
width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=512)
|
|
|
86 |
queue=False
|
87 |
).then(
|
88 |
fn=gen,
|
89 |
+
inputs=[prompt, basemodel_choice, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, process_upscale],
|
90 |
outputs=[output_res]
|
91 |
)
|
92 |
|