Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
import json | |
from gradio_client import Client, handle_file | |
from gradio_imageslider import ImageSlider | |
with open('loras.json', 'r') as f: | |
loras = json.load(f) | |
job = None | |
# Verificar las URLs de los modelos | |
custom_model_url = "https://fffiloni-sd-xl-custom-model.hf.space" | |
tile_upscaler_url = "https://gokaygokay-tileupscalerv2.hf.space" | |
try: | |
client_custom_model = Client(custom_model_url) | |
print(f"Loaded custom model from {custom_model_url}") | |
except ValueError as e: | |
print(f"Failed to load custom model: {e}") | |
try: | |
client_tile_upscaler = Client(tile_upscaler_url) | |
print(f"Loaded custom model from {tile_upscaler_url}") | |
except ValueError as e: | |
print(f"Failed to load custom model: {e}") | |
def infer(selected_index, prompt, style_prompt, inf_steps, guidance_scale, width, height, seed, lora_weight, progress=gr.Progress(track_tqdm=True)): | |
try: | |
global job | |
if selected_index is None: | |
raise gr.Error("You must select a LoRA before proceeding.") | |
selected_lora = loras[selected_index] | |
custom_model = selected_lora["repo"] | |
trigger_word = selected_lora["trigger_word"] | |
result = client_custom_model.submit( | |
custom_model=custom_model, | |
api_name="/load_model" | |
) | |
weight_name = result.result()[2]['value'] | |
prompt_arr = [trigger_word, prompt, style_prompt] | |
prompt = '. '.join([element.strip() for element in prompt_arr if element.strip() != '']) | |
job = client_custom_model.submit( | |
custom_model=custom_model, | |
weight_name=weight_name, | |
prompt=prompt, | |
inf_steps=inf_steps, | |
guidance_scale=guidance_scale, | |
width=width, | |
height=height, | |
seed=seed, | |
lora_weight=lora_weight, | |
api_name="/infer" | |
) | |
result = job.result() | |
new_result = result + (prompt, ) | |
return new_result | |
except Exception as e: | |
gr.Warning("Error: " + str(e)) | |
def cancel_infer(): | |
global job | |
if job: | |
job.cancel() | |
return "Job has been cancelled" | |
return "No job to cancel" | |
def update_selection(evt: gr.SelectData): | |
selected_lora = loras[evt.index] | |
new_placeholder = f"Type a prompt for {selected_lora['title']}" | |
lora_repo = selected_lora["repo"] | |
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨" | |
return ( | |
gr.update(placeholder=new_placeholder), | |
updated_text, | |
evt.index | |
) | |
def upscale_image(image, resolution, num_inference_steps, strength, hdr, guidance_scale, controlnet_strength, scheduler_name): | |
result = client_tile_upscaler.predict( | |
param_0=handle_file(image), | |
param_1=resolution, | |
param_2=num_inference_steps, | |
param_3=strength, | |
param_4=hdr, | |
param_5=guidance_scale, | |
param_6=controlnet_strength, | |
param_7=scheduler_name, | |
api_name="/wrapper" | |
) | |
return result | |
css=""" | |
""" | |
with gr.Blocks(css=css) as demo: | |
gr.Markdown("# lichorosario LoRA Portfolio") | |
gr.Markdown( | |
"### This is my portfolio.\n" | |
"**Note**: Generation quality may vary. For best results, adjust the parameters.\n" | |
"Special thanks to [@artificialguybr](https://huggingface.co/artificialguybr) and [@fffiloni](https://huggingface.co/fffiloni)." | |
) | |
with gr.Row(): | |
with gr.Column(scale=2): | |
prompt_in = gr.Textbox( | |
label="Your Prompt", | |
info="Don't forget to include your trigger word if necessary" | |
) | |
style_prompt_in = gr.Textbox( | |
label="Your Style Prompt" | |
) | |
selected_info = gr.Markdown("") | |
used_prompt = gr.Textbox( | |
label="Used prompt" | |
) | |
with gr.Column(elem_id="col-container"): | |
with gr.Accordion("Advanced Settings", open=False): | |
with gr.Row(): | |
inf_steps = gr.Slider( | |
label="Inference steps", | |
minimum=12, | |
maximum=100, | |
step=1, | |
value=25 | |
) | |
guidance_scale = gr.Slider( | |
label="Guidance scale", | |
minimum=0.0, | |
maximum=50.0, | |
step=0.1, | |
value=12 | |
) | |
with gr.Row(): | |
width = gr.Slider( | |
label="Width", | |
minimum=256, | |
maximum=3072, | |
step=32, | |
value=2048, | |
) | |
height = gr.Slider( | |
label="Height", | |
minimum=256, | |
maximum=3072, | |
step=32, | |
value=1024, | |
) | |
examples = [ | |
[1024,512], | |
[2048,512], | |
[3072, 512] | |
] | |
gr.Examples( | |
label="Presets", | |
examples=examples, | |
inputs=[width, height], | |
outputs=[] | |
) | |
with gr.Row(): | |
seed = gr.Slider( | |
label="Seed", | |
info="-1 denotes a random seed", | |
minimum=-1, | |
maximum=423538377342, | |
step=1, | |
value=-1 | |
) | |
last_used_seed = gr.Number( | |
label="Last used seed", | |
info="the seed used in the last generation", | |
) | |
lora_weight = gr.Slider( | |
label="LoRa weight", | |
minimum=0.0, | |
maximum=1.0, | |
step=0.01, | |
value=1.0 | |
) | |
with gr.Column(scale=1): | |
gallery = gr.Gallery( | |
[(item["image"], item["title"]) for item in loras], | |
label="LoRA Gallery", | |
allow_preview=False, | |
columns=2, | |
height="100%" | |
) | |
submit_btn = gr.Button("Submit") | |
cancel_btn = gr.Button("Cancel") | |
with gr.Row(): | |
image_out = gr.Image(label="Image output") | |
image_upscaled = ImageSlider(label="Before / After", type="numpy", show_download_button=False) | |
scale_btn = gr.Button("Upscale") | |
selected_index = gr.State(None) | |
submit_btn.click( | |
fn=infer, | |
inputs=[selected_index, prompt_in, style_prompt_in, inf_steps, guidance_scale, width, height, seed, lora_weight], | |
outputs=[image_out, last_used_seed, used_prompt] | |
) | |
cancel_btn.click( | |
fn=cancel_infer, | |
outputs=[] | |
) | |
def upscale_with_fixed_values(image): | |
return upscale_image(image, 768, 25, 0.4, 0.3, 7.5) | |
scale_btn.click( | |
fn=upscale_with_fixed_values, | |
inputs=[image_out], | |
outputs=[image_upscaled] | |
) | |
gallery.select(update_selection, outputs=[prompt_in, selected_info, selected_index]) | |
demo.launch() | |