Spaces:
Running
Running
File size: 4,321 Bytes
0cfb4a5 d4fba6d 0dec378 de6051a 0dec378 0a67e9a a484b84 d4fba6d 2fc432b 1a52ee5 4ec4b86 219d097 c5b40c9 e3be785 bc9a69a e3be785 2fc432b 6bd865c bc9a69a 6bd865c 1a52ee5 61bff42 6bd865c 2fc432b bc9a69a 0a48097 6bd865c 581837a 0a48097 581837a 0a48097 6bd865c e3be785 4ec4b86 6bd865c 72a2518 e019f29 bc9a69a b329b3a bc9a69a 4982a7b 00fcfbf cb7de37 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
import os
import gradio as gr
import numpy as np
import random
from huggingface_hub import AsyncInferenceClient
from translatepy import Translator
import requests
import re
import asyncio
from PIL import Image
from gradio_client import Client, handle_file
from huggingface_hub import login
from gradio_imageslider import ImageSlider
MAX_SEED = np.iinfo(np.int32).max
HF_TOKEN = os.environ.get("HF_TOKEN")
HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
def enable_lora(lora_add, basemodel):
return basemodel if not lora_add else lora_add
async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
try:
if seed == -1:
seed = random.randint(0, MAX_SEED)
seed = int(seed)
text = str(Translator().translate(prompt, 'English')) + "," + lora_word
client = AsyncInferenceClient()
image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
return image, seed
except Exception as e:
print(f"Error generating image: {e}")
return None, None
def get_upscale_finegrain(prompt, img_path, upscale_factor):
try:
client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER)
result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
return result[1]
except Exception as e:
print(f"Error upscale image: {e}")
return None
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
model = enable_lora(lora_model, basemodel) if process_lora else basemodel
image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
if image is None:
return [None, None]
image_path = "temp_image.jpg"
image.save(image_path, format="JPEG")
if process_upscale:
upscale_image_path = get_upscale_finegrain(prompt, image_path, upscale_factor)
upscale_image = Image.open(upscale_image_path)
upscale_image.save("upscale_image.jpg", format="JPEG")
return [image_path, "upscale_image.jpg"]
else:
return [image_path, image_path]
css = """
#col-container{ margin: 0 auto; max-width: 1024px;}
"""
with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
with gr.Column(elem_id="col-container"):
with gr.Row():
with gr.Column(scale=3):
output_res = ImageSlider(label="Flux / Upscaled")
with gr.Column(scale=2):
prompt = gr.Textbox(label="Descripción de imágen")
basemodel_choice = gr.Dropdown(label="Modelo", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell")
lora_model_choice = gr.Dropdown(label="LORA Realismo", choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"], value="XLabs-AI/flux-RealismLora")
process_lora = gr.Checkbox(label="Procesar LORA")
process_upscale = gr.Checkbox(label="Procesar Escalador")
upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
with gr.Accordion(label="Opciones Avanzadas", open=False):
width = gr.Slider(label="Ancho", minimum=512, maximum=1280, step=8, value=1280)
height = gr.Slider(label="Alto", minimum=512, maximum=1280, step=8, value=768)
scales = gr.Slider(label="Escalado", minimum=1, maximum=20, step=1, value=10)
steps = gr.Slider(label="Pasos", minimum=1, maximum=100, step=1, value=20)
seed = gr.Number(label="Semilla", value=-1)
btn = gr.Button("Generar")
btn.click(fn=gen, inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora], outputs=output_res,)
demo.launch() |