Spaces:
Sleeping
Sleeping
File size: 7,366 Bytes
0cfb4a5 d4fba6d 0dec378 757da8f d4fba6d 2fc432b d95dbe9 32fdddd 219d097 471c590 757da8f 52a0784 757da8f 481dde5 d95dbe9 2fc432b 32fdddd 757da8f 32fdddd 52a0784 1a52ee5 68ef0f8 481dde5 68ef0f8 481dde5 d95dbe9 a521e90 5d264e2 a521e90 d95dbe9 a521e90 2fc432b 5d264e2 757da8f a66d7b5 a521e90 a66d7b5 ffe0681 a521e90 2713519 757da8f e3be785 32fdddd e3be785 58314c4 e3be785 3b4ee8c 32fdddd 3b4ee8c 32fdddd 5d264e2 67e8080 5d264e2 58314c4 a521e90 d8f32ab 32fdddd d95dbe9 a521e90 68ef0f8 2713519 68ef0f8 a521e90 d8f32ab 58314c4 06b8bc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
import os
import gradio as gr
import numpy as np
import random
from huggingface_hub import AsyncInferenceClient, InferenceClient
from PIL import Image
from gradio_client import Client, handle_file
from gradio_imageslider import ImageSlider
MAX_SEED = np.iinfo(np.int32).max
HF_TOKEN = os.environ.get("HF_TOKEN")
HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
client = AsyncInferenceClient()
llm_client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
def enable_lora(lora_add, basemodel):
return basemodel if not lora_add else lora_add
async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
try:
if seed == -1:
seed = random.randint(0, MAX_SEED)
seed = int(seed)
text = prompt + "," + lora_word
image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
return image, seed
except Exception as e:
return f"Error al generar imagen: {e}", None
def get_upscale_finegrain(prompt, img_path, upscale_factor):
try:
client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER)
result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
return result[1]
except Exception as e:
return None
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
try:
model = enable_lora(lora_model, basemodel) if process_lora else basemodel
improved_prompt = await improve_prompt(prompt)
combined_prompt = f"{prompt} {improved_prompt}"
image, seed = await generate_image(combined_prompt, model, "", width, height, scales, steps, seed)
if isinstance(image, str) and image.startswith("Error"):
return [image, None]
image_path = "temp_image.jpg"
image.save(image_path, format="JPEG")
if process_upscale:
upscale_image_path = get_upscale_finegrain(combined_prompt, image_path, upscale_factor)
if upscale_image_path is not None:
upscale_image = Image.open(upscale_image_path)
upscale_image.save("upscale_image.jpg", format="JPEG")
return [image_path, "upscale_image.jpg"]
else:
return [image_path, image_path]
else:
return [image_path, image_path]
except Exception as e:
return [f"Error: {e}", None]
def error_handler(err):
return f"Error: {err}"
async def improve_prompt(prompt):
try:
instruction = "improve this idea and describe in English a detailed img2vid prompt in a single paragraph of up to 200 characters, developing atmosphere, characters, lighting, and cameras."
formatted_prompt = f"{prompt}: {instruction}"
response = llm_client.text_generation(formatted_prompt, max_new_tokens=200)
improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
return improved_text
except Exception as e:
return f"Error mejorando el prompt: {e}"
css = """
#col-container{ margin: 0 auto; max-width: 1024px;}
"""
with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
with gr.Column(elem_id="col-container"):
with gr.Row():
with gr.Column(scale=3):
output_res = ImageSlider(label="Flux / Upscaled")
with gr.Column(scale=2):
prompt = gr.Textbox(label="Descripción de imágen")
basemodel_choice = gr.Dropdown(label="Modelo", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell")
lora_model_choice = gr.Dropdown(label="LORA Realismo", choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"], value="XLabs-AI/flux-RealismLora")
with gr.Row():
process_lora = gr.Checkbox(label="Procesar LORA")
process_upscale = gr.Checkbox(label="Procesar Escalador")
improved_prompt = gr.Textbox(label="Prompt Mejorado", interactive=False)
improve_btn = gr.Button("Mejora mi prompt")
def improve_prompt_wrapper(prompt):
improved_text = improve_prompt(prompt)
return prompt, improved_text
improve_btn.click(fn=improve_prompt_wrapper, inputs=[prompt], outputs=[prompt, improved_prompt])
reset_btn = gr.Button("Reset")
reset_btn.click(fn=lambda: [prompt.update(""), improved_prompt.update("")], inputs=None, outputs=[prompt, improved_prompt])
with gr.Accordion(label="Opciones Avanzadas", open=False):
width = gr.Slider(label="Ancho", minimum=512, maximum=1280, step=8, value=1280)
height = gr.Slider(label="Alto", minimum=512, maximum=1280, step=8, value=768)
upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
scales = gr.Slider(label="Escalado", minimum=1, maximum=20, step=1, value=10)
steps = gr.Slider(label="Pasos", minimum=1, maximum=100, step=1, value=20)
seed = gr.Number(label="Semilla", value=-1)
reset_advanced = gr.Button("Reset")
reset_advanced.click(fn=lambda: [width.update(1280), height.update(768), scales.update(10), steps.update(20), seed.update(-1)], inputs=None, outputs=[width, height, scales, steps, seed])
generating = gr.StatusTracker(label="Generando", status=False)
btn = gr.Button("Generar", variant="primary", status_tracker=generating)
btn.click(
fn=gen,
inputs=[
prompt,
basemodel_choice,
width,
height,
scales,
steps,
seed,
upscale_factor,
process_upscale,
lora_model_choice,
process_lora,
],
outputs=[output_res],
error_handler=error_handler,
)
def check_prompt_change(prompt, previous_prompt):
if prompt != previous_prompt:
generating.update(status=True)
return previous_prompt
previous_prompt = gr.State("")
btn.click(
fn=check_prompt_change,
inputs=[prompt, previous_prompt],
outputs=[previous_prompt],
before_fn=True,
)
demo.launch() |