John6666's picture
Upload 9 files
b4477ed verified
import spaces
import os
from stablepy import (
Model_Diffusers,
SCHEDULE_TYPE_OPTIONS,
SCHEDULE_PREDICTION_TYPE_OPTIONS,
check_scheduler_compatibility,
)
from constants import (
PREPROCESSOR_CONTROLNET,
TASK_STABLEPY,
TASK_MODEL_LIST,
UPSCALER_DICT_GUI,
UPSCALER_KEYS,
PROMPT_W_OPTIONS,
WARNING_MSG_VAE,
SDXL_TASK,
MODEL_TYPE_TASK,
POST_PROCESSING_SAMPLER,
)
from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
import torch
import re
from stablepy import (
scheduler_names,
IP_ADAPTERS_SD,
IP_ADAPTERS_SDXL,
)
import time
from PIL import ImageFile
from utils import (
get_model_list,
extract_parameters,
get_model_type,
extract_exif_data,
create_mask_now,
download_diffuser_repo,
get_used_storage_gb,
delete_model,
progress_step_bar,
html_template_message,
escape_html,
)
from datetime import datetime
import gradio as gr
import logging
import diffusers
import warnings
from stablepy import logger
# import urllib.parse
ImageFile.LOAD_TRUNCATED_IMAGES = True
# os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
print(os.getenv("SPACES_ZERO_GPU"))
## BEGIN MOD
import gradio as gr
import logging
logging.getLogger("diffusers").setLevel(logging.ERROR)
import diffusers
diffusers.utils.logging.set_verbosity(40)
import warnings
warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
from stablepy import logger
logger.setLevel(logging.DEBUG)
from env import (
HF_TOKEN, HF_READ_TOKEN, # to use only for private repos
CIVITAI_API_KEY, HF_LORA_PRIVATE_REPOS1, HF_LORA_PRIVATE_REPOS2,
HF_LORA_ESSENTIAL_PRIVATE_REPO, HF_VAE_PRIVATE_REPO,
HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO,
DIRECTORY_MODELS, DIRECTORY_LORAS, DIRECTORY_VAES, DIRECTORY_EMBEDS,
DIRECTORY_EMBEDS_SDXL, DIRECTORY_EMBEDS_POSITIVE_SDXL,
LOAD_DIFFUSERS_FORMAT_MODEL, DOWNLOAD_MODEL_LIST, DOWNLOAD_LORA_LIST,
DOWNLOAD_VAE_LIST, DOWNLOAD_EMBEDS)
from modutils import (to_list, list_uniq, list_sub, get_model_id_list, get_tupled_embed_list,
get_tupled_model_list, get_lora_model_list, download_private_repo, download_things)
# - **Download Models**
download_model = ", ".join(DOWNLOAD_MODEL_LIST)
# - **Download VAEs**
download_vae = ", ".join(DOWNLOAD_VAE_LIST)
# - **Download LoRAs**
download_lora = ", ".join(DOWNLOAD_LORA_LIST)
#download_private_repo(HF_LORA_ESSENTIAL_PRIVATE_REPO, DIRECTORY_LORAS, True)
download_private_repo(HF_VAE_PRIVATE_REPO, DIRECTORY_VAES, False)
load_diffusers_format_model = list_uniq(LOAD_DIFFUSERS_FORMAT_MODEL + get_model_id_list())
## END MOD
# Download stuffs
for url in [url.strip() for url in download_model.split(',')]:
if not os.path.exists(f"./models/{url.split('/')[-1]}"):
download_things(DIRECTORY_MODELS, url, HF_TOKEN, CIVITAI_API_KEY)
for url in [url.strip() for url in download_vae.split(',')]:
if not os.path.exists(f"./vaes/{url.split('/')[-1]}"):
download_things(DIRECTORY_VAES, url, HF_TOKEN, CIVITAI_API_KEY)
for url in [url.strip() for url in download_lora.split(',')]:
if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY)
# Download Embeddings
for url_embed in DOWNLOAD_EMBEDS:
if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
download_things(DIRECTORY_EMBEDS, url_embed, HF_TOKEN, CIVITAI_API_KEY)
# Build list models
embed_list = get_model_list(DIRECTORY_EMBEDS)
single_file_model_list = get_model_list(DIRECTORY_MODELS)
model_list = list_uniq(get_model_id_list() + LOAD_DIFFUSERS_FORMAT_MODEL + single_file_model_list)
## BEGIN MOD
lora_model_list = get_lora_model_list()
vae_model_list = get_model_list(DIRECTORY_VAES)
vae_model_list.insert(0, "BakedVAE")
vae_model_list.insert(0, "None")
#download_private_repo(HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, DIRECTORY_EMBEDS_SDXL, False)
#download_private_repo(HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO, DIRECTORY_EMBEDS_POSITIVE_SDXL, False)
embed_sdxl_list = get_model_list(DIRECTORY_EMBEDS_SDXL) + get_model_list(DIRECTORY_EMBEDS_POSITIVE_SDXL)
def get_embed_list(pipeline_name):
return get_tupled_embed_list(embed_sdxl_list if pipeline_name == "StableDiffusionXLPipeline" else embed_list)
## END MOD
print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
## BEGIN MOD
class GuiSD:
def __init__(self, stream=True):
self.model = None
self.status_loading = False
self.sleep_loading = 4
self.last_load = datetime.now()
self.inventory = []
def update_storage_models(self, storage_floor_gb=42, required_inventory_for_purge=3):
while get_used_storage_gb() > storage_floor_gb:
if len(self.inventory) < required_inventory_for_purge:
break
removal_candidate = self.inventory.pop(0)
delete_model(removal_candidate)
def update_inventory(self, model_name):
if model_name not in single_file_model_list:
self.inventory = [
m for m in self.inventory if m != model_name
] + [model_name]
print(self.inventory)
def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
#progress(0, desc="Start inference...")
images, seed, image_list, metadata = model(**pipe_params)
#progress(1, desc="Inference completed.")
if not isinstance(images, list): images = [images]
images = save_images(images, metadata)
img = []
for image in images:
img.append((image, None))
return img
def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
self.update_storage_models()
# download link model > model_name
vae_model = vae_model if vae_model != "None" else None
model_type = get_model_type(model_name)
dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
if not os.path.exists(model_name):
_ = download_diffuser_repo(
repo_name=model_name,
model_type=model_type,
revision="main",
token=True,
)
self.update_inventory(model_name)
for i in range(68):
if not self.status_loading:
self.status_loading = True
if i > 0:
time.sleep(self.sleep_loading)
print("Previous model ops...")
break
time.sleep(0.5)
print(f"Waiting queue {i}")
yield "Waiting queue"
self.status_loading = True
yield f"Loading model: {model_name}"
if vae_model == "BakedVAE":
if not os.path.exists(model_name):
vae_model = model_name
else:
vae_model = None
elif vae_model:
vae_type = "SDXL" if "sdxl" in vae_model.lower() else "SD 1.5"
if model_type != vae_type:
gr.Warning(WARNING_MSG_VAE)
print("Loading model...")
try:
start_time = time.time()
if self.model is None:
self.model = Model_Diffusers(
base_model_id=model_name,
task_name=TASK_STABLEPY[task],
vae_model=vae_model,
type_model_precision=dtype_model,
retain_task_model_in_cache=False,
device="cpu",
)
else:
if self.model.base_model_id != model_name:
load_now_time = datetime.now()
elapsed_time = max((load_now_time - self.last_load).total_seconds(), 0)
if elapsed_time <= 8:
print("Waiting for the previous model's time ops...")
time.sleep(8-elapsed_time)
self.model.device = torch.device("cpu")
self.model.load_pipe(
model_name,
task_name=TASK_STABLEPY[task],
vae_model=vae_model,
type_model_precision=dtype_model,
retain_task_model_in_cache=False,
)
end_time = time.time()
self.sleep_loading = max(min(int(end_time - start_time), 10), 4)
except Exception as e:
self.last_load = datetime.now()
self.status_loading = False
self.sleep_loading = 4
raise e
self.last_load = datetime.now()
self.status_loading = False
yield f"Model loaded: {model_name}"
#@spaces.GPU
@torch.inference_mode()
def generate_pipeline(
self,
prompt,
neg_prompt,
num_images,
steps,
cfg,
clip_skip,
seed,
lora1,
lora_scale1,
lora2,
lora_scale2,
lora3,
lora_scale3,
lora4,
lora_scale4,
lora5,
lora_scale5,
sampler,
schedule_type,
schedule_prediction_type,
img_height,
img_width,
model_name,
vae_model,
task,
image_control,
preprocessor_name,
preprocess_resolution,
image_resolution,
style_prompt, # list []
style_json_file,
image_mask,
strength,
low_threshold,
high_threshold,
value_threshold,
distance_threshold,
controlnet_output_scaling_in_unet,
controlnet_start_threshold,
controlnet_stop_threshold,
textual_inversion,
syntax_weights,
upscaler_model_path,
upscaler_increases_size,
esrgan_tile,
esrgan_tile_overlap,
hires_steps,
hires_denoising_strength,
hires_sampler,
hires_prompt,
hires_negative_prompt,
hires_before_adetailer,
hires_after_adetailer,
loop_generation,
leave_progress_bar,
disable_progress_bar,
image_previews,
display_images,
save_generated_images,
filename_pattern,
image_storage_location,
retain_compel_previous_load,
retain_detailfix_model_previous_load,
retain_hires_model_previous_load,
t2i_adapter_preprocessor,
t2i_adapter_conditioning_scale,
t2i_adapter_conditioning_factor,
xformers_memory_efficient_attention,
freeu,
generator_in_cpu,
adetailer_inpaint_only,
adetailer_verbose,
adetailer_sampler,
adetailer_active_a,
prompt_ad_a,
negative_prompt_ad_a,
strength_ad_a,
face_detector_ad_a,
person_detector_ad_a,
hand_detector_ad_a,
mask_dilation_a,
mask_blur_a,
mask_padding_a,
adetailer_active_b,
prompt_ad_b,
negative_prompt_ad_b,
strength_ad_b,
face_detector_ad_b,
person_detector_ad_b,
hand_detector_ad_b,
mask_dilation_b,
mask_blur_b,
mask_padding_b,
retain_task_cache_gui,
image_ip1,
mask_ip1,
model_ip1,
mode_ip1,
scale_ip1,
image_ip2,
mask_ip2,
model_ip2,
mode_ip2,
scale_ip2,
pag_scale,
):
info_state = html_template_message("Navigating latent space...")
yield info_state, gr.update(), gr.update()
vae_model = vae_model if vae_model != "None" else None
loras_list = [lora1, lora2, lora3, lora4, lora5]
vae_msg = f"VAE: {vae_model}" if vae_model else ""
msg_lora = ""
## BEGIN MOD
loras_list = [s if s else "None" for s in loras_list]
global lora_model_list
lora_model_list = get_lora_model_list()
## END MOD
print("Config model:", model_name, vae_model, loras_list)
task = TASK_STABLEPY[task]
params_ip_img = []
params_ip_msk = []
params_ip_model = []
params_ip_mode = []
params_ip_scale = []
all_adapters = [
(image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1),
(image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2),
]
if not hasattr(self.model.pipe, "transformer"):
for imgip, mskip, modelip, modeip, scaleip in all_adapters:
if imgip:
params_ip_img.append(imgip)
if mskip:
params_ip_msk.append(mskip)
params_ip_model.append(modelip)
params_ip_mode.append(modeip)
params_ip_scale.append(scaleip)
concurrency = 5
self.model.stream_config(concurrency=concurrency, latent_resize_by=1, vae_decoding=False)
if task != "txt2img" and not image_control:
raise ValueError("No control image found: To use this function, you have to upload an image in 'Image ControlNet/Inpaint/Img2img'")
if task == "inpaint" and not image_mask:
raise ValueError("No mask image found: Specify one in 'Image Mask'")
if upscaler_model_path in UPSCALER_KEYS[:9]:
upscaler_model = upscaler_model_path
else:
directory_upscalers = 'upscalers'
os.makedirs(directory_upscalers, exist_ok=True)
url_upscaler = UPSCALER_DICT_GUI[upscaler_model_path]
if not os.path.exists(f"./upscalers/{url_upscaler.split('/')[-1]}"):
download_things(directory_upscalers, url_upscaler, HF_TOKEN)
upscaler_model = f"./upscalers/{url_upscaler.split('/')[-1]}"
logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
adetailer_params_A = {
"face_detector_ad": face_detector_ad_a,
"person_detector_ad": person_detector_ad_a,
"hand_detector_ad": hand_detector_ad_a,
"prompt": prompt_ad_a,
"negative_prompt": negative_prompt_ad_a,
"strength": strength_ad_a,
# "image_list_task" : None,
"mask_dilation": mask_dilation_a,
"mask_blur": mask_blur_a,
"mask_padding": mask_padding_a,
"inpaint_only": adetailer_inpaint_only,
"sampler": adetailer_sampler,
}
adetailer_params_B = {
"face_detector_ad": face_detector_ad_b,
"person_detector_ad": person_detector_ad_b,
"hand_detector_ad": hand_detector_ad_b,
"prompt": prompt_ad_b,
"negative_prompt": negative_prompt_ad_b,
"strength": strength_ad_b,
# "image_list_task" : None,
"mask_dilation": mask_dilation_b,
"mask_blur": mask_blur_b,
"mask_padding": mask_padding_b,
}
pipe_params = {
"prompt": prompt,
"negative_prompt": neg_prompt,
"img_height": img_height,
"img_width": img_width,
"num_images": num_images,
"num_steps": steps,
"guidance_scale": cfg,
"clip_skip": clip_skip,
"pag_scale": float(pag_scale),
"seed": seed,
"image": image_control,
"preprocessor_name": preprocessor_name,
"preprocess_resolution": preprocess_resolution,
"image_resolution": image_resolution,
"style_prompt": style_prompt if style_prompt else "",
"style_json_file": "",
"image_mask": image_mask, # only for Inpaint
"strength": strength, # only for Inpaint or ...
"low_threshold": low_threshold,
"high_threshold": high_threshold,
"value_threshold": value_threshold,
"distance_threshold": distance_threshold,
"lora_A": lora1 if lora1 != "None" else None,
"lora_scale_A": lora_scale1,
"lora_B": lora2 if lora2 != "None" else None,
"lora_scale_B": lora_scale2,
"lora_C": lora3 if lora3 != "None" else None,
"lora_scale_C": lora_scale3,
"lora_D": lora4 if lora4 != "None" else None,
"lora_scale_D": lora_scale4,
"lora_E": lora5 if lora5 != "None" else None,
"lora_scale_E": lora_scale5,
## BEGIN MOD
"textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
## END MOD
"syntax_weights": syntax_weights, # "Classic"
"sampler": sampler,
"schedule_type": schedule_type,
"schedule_prediction_type": schedule_prediction_type,
"xformers_memory_efficient_attention": xformers_memory_efficient_attention,
"gui_active": True,
"loop_generation": loop_generation,
"controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
"control_guidance_start": float(controlnet_start_threshold),
"control_guidance_end": float(controlnet_stop_threshold),
"generator_in_cpu": generator_in_cpu,
"FreeU": freeu,
"adetailer_A": adetailer_active_a,
"adetailer_A_params": adetailer_params_A,
"adetailer_B": adetailer_active_b,
"adetailer_B_params": adetailer_params_B,
"leave_progress_bar": leave_progress_bar,
"disable_progress_bar": disable_progress_bar,
"image_previews": image_previews,
"display_images": display_images,
"save_generated_images": save_generated_images,
"filename_pattern": filename_pattern,
"image_storage_location": image_storage_location,
"retain_compel_previous_load": retain_compel_previous_load,
"retain_detailfix_model_previous_load": retain_detailfix_model_previous_load,
"retain_hires_model_previous_load": retain_hires_model_previous_load,
"t2i_adapter_preprocessor": t2i_adapter_preprocessor,
"t2i_adapter_conditioning_scale": float(t2i_adapter_conditioning_scale),
"t2i_adapter_conditioning_factor": float(t2i_adapter_conditioning_factor),
"upscaler_model_path": upscaler_model,
"upscaler_increases_size": upscaler_increases_size,
"esrgan_tile": esrgan_tile,
"esrgan_tile_overlap": esrgan_tile_overlap,
"hires_steps": hires_steps,
"hires_denoising_strength": hires_denoising_strength,
"hires_prompt": hires_prompt,
"hires_negative_prompt": hires_negative_prompt,
"hires_sampler": hires_sampler,
"hires_before_adetailer": hires_before_adetailer,
"hires_after_adetailer": hires_after_adetailer,
"ip_adapter_image": params_ip_img,
"ip_adapter_mask": params_ip_msk,
"ip_adapter_model": params_ip_model,
"ip_adapter_mode": params_ip_mode,
"ip_adapter_scale": params_ip_scale,
}
self.model.device = torch.device("cuda:0")
if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * 5:
self.model.pipe.transformer.to(self.model.device)
print("transformer to cuda")
#return self.infer_short(self.model, pipe_params), info_state
actual_progress = 0
info_images = gr.update()
for img, [seed, image_path, metadata] in self.model(**pipe_params):
info_state = progress_step_bar(actual_progress, steps)
actual_progress += concurrency
if image_path:
info_images = f"Seeds: {str(seed)}"
if vae_msg:
info_images = info_images + "<br>" + vae_msg
if "Cannot copy out of meta tensor; no data!" in self.model.last_lora_error:
msg_ram = "Unable to process the LoRAs due to high RAM usage; please try again later."
print(msg_ram)
msg_lora += f"<br>{msg_ram}"
for status, lora in zip(self.model.lora_status, self.model.lora_memory):
if status:
msg_lora += f"<br>Loaded: {lora}"
elif status is not None:
msg_lora += f"<br>Error with: {lora}"
if msg_lora:
info_images += msg_lora
info_images = info_images + "<br>" + "GENERATION DATA:<br>" + escape_html(metadata[0]) + "<br>-------<br>"
download_links = "<br>".join(
[
f'<a href="{path.replace("/images/", "/file=/home/user/app/images/")}" download="{os.path.basename(path)}">Download Image {i + 1}</a>'
for i, path in enumerate(image_path)
]
)
if save_generated_images:
info_images += f"<br>{download_links}"
## BEGIN MOD
if not isinstance(img, list): img = [img]
img = save_images(img, metadata)
img = [(i, None) for i in img]
## END MOD
info_state = "COMPLETE"
yield info_state, img, info_images
#return info_state, img, info_images
def dynamic_gpu_duration(func, duration, *args):
@spaces.GPU(duration=duration)
def wrapped_func():
yield from func(*args)
return wrapped_func()
@spaces.GPU
def dummy_gpu():
return None
def sd_gen_generate_pipeline(*args):
gpu_duration_arg = int(args[-1]) if args[-1] else 59
verbose_arg = int(args[-2])
load_lora_cpu = args[-3]
generation_args = args[:-3]
lora_list = [
None if item == "None" or item == "" else item # MOD
for item in [args[7], args[9], args[11], args[13], args[15]]
]
lora_status = [None] * 5
msg_load_lora = "Updating LoRAs in GPU..."
if load_lora_cpu:
msg_load_lora = "Updating LoRAs in CPU (Slow but saves GPU usage)..."
if lora_list != sd_gen.model.lora_memory and lora_list != [None] * 5:
yield msg_load_lora, gr.update(), gr.update()
# Load lora in CPU
if load_lora_cpu:
lora_status = sd_gen.model.lora_merge(
lora_A=lora_list[0], lora_scale_A=args[8],
lora_B=lora_list[1], lora_scale_B=args[10],
lora_C=lora_list[2], lora_scale_C=args[12],
lora_D=lora_list[3], lora_scale_D=args[14],
lora_E=lora_list[4], lora_scale_E=args[16],
)
print(lora_status)
sampler_name = args[17]
schedule_type_name = args[18]
_, _, msg_sampler = check_scheduler_compatibility(
sd_gen.model.class_name, sampler_name, schedule_type_name
)
if msg_sampler:
gr.Warning(msg_sampler)
if verbose_arg:
for status, lora in zip(lora_status, lora_list):
if status:
gr.Info(f"LoRA loaded in CPU: {lora}")
elif status is not None:
gr.Warning(f"Failed to load LoRA: {lora}")
if lora_status == [None] * 5 and sd_gen.model.lora_memory != [None] * 5 and load_lora_cpu:
lora_cache_msg = ", ".join(
str(x) for x in sd_gen.model.lora_memory if x is not None
)
gr.Info(f"LoRAs in cache: {lora_cache_msg}")
msg_request = f"Requesting {gpu_duration_arg}s. of GPU time.\nModel: {sd_gen.model.base_model_id}"
if verbose_arg:
gr.Info(msg_request)
print(msg_request)
yield msg_request.replace("\n", "<br>"), gr.update(), gr.update()
start_time = time.time()
# yield from sd_gen.generate_pipeline(*generation_args)
yield from dynamic_gpu_duration(
#return dynamic_gpu_duration(
sd_gen.generate_pipeline,
gpu_duration_arg,
*generation_args,
)
end_time = time.time()
execution_time = end_time - start_time
msg_task_complete = (
f"GPU task complete in: {int(round(execution_time, 0) + 1)} seconds"
)
if verbose_arg:
gr.Info(msg_task_complete)
print(msg_task_complete)
yield msg_task_complete, gr.update(), gr.update()
@spaces.GPU(duration=15)
def esrgan_upscale(image, upscaler_name, upscaler_size):
if image is None: return None
from stablepy.diffusers_vanilla.utils import save_pil_image_with_metadata
from stablepy import UpscalerESRGAN
exif_image = extract_exif_data(image)
url_upscaler = UPSCALER_DICT_GUI[upscaler_name]
directory_upscalers = 'upscalers'
os.makedirs(directory_upscalers, exist_ok=True)
if not os.path.exists(f"./upscalers/{url_upscaler.split('/')[-1]}"):
download_things(directory_upscalers, url_upscaler, HF_TOKEN)
scaler_beta = UpscalerESRGAN(0, 0)
image_up = scaler_beta.upscale(image, upscaler_size, f"./upscalers/{url_upscaler.split('/')[-1]}")
image_path = save_pil_image_with_metadata(image_up, f'{os.getcwd()}/up_images', exif_image)
return image_path
dynamic_gpu_duration.zerogpu = True
sd_gen_generate_pipeline.zerogpu = True
sd_gen = GuiSD()
from pathlib import Path
from PIL import Image
import PIL
import numpy as np
import random
import json
import shutil
from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
get_local_model_list, get_private_lora_model_lists, get_valid_lora_name, get_state, set_state,
get_valid_lora_path, get_valid_lora_wt, get_lora_info, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history)
#@spaces.GPU
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
sampler = "Euler", vae = None, translate=True, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
recom_prompt = True, progress=gr.Progress(track_tqdm=True)):
MAX_SEED = np.iinfo(np.int32).max
image_previews = True
load_lora_cpu = False
verbose_info = False
gpu_duration = 59
filename_pattern = "model,seed"
images: list[tuple[PIL.Image.Image, str | None]] = []
progress(0, desc="Preparing...")
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed).seed()
if translate:
prompt = translate_to_en(prompt)
negative_prompt = translate_to_en(prompt)
prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name, recom_prompt)
progress(0.5, desc="Preparing...")
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
lora1 = get_valid_lora_path(lora1)
lora2 = get_valid_lora_path(lora2)
lora3 = get_valid_lora_path(lora3)
lora4 = get_valid_lora_path(lora4)
lora5 = get_valid_lora_path(lora5)
progress(1, desc="Preparation completed. Starting inference...")
progress(0, desc="Loading model...")
for _ in sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0]):
pass
progress(1, desc="Model loaded.")
progress(0, desc="Starting Inference...")
for info_state, stream_images, info_images in sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
lora4, lora4_wt, lora5, lora5_wt, sampler, schedule_type, schedule_prediction_type,
height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,
None, None, None, 0.35, 100, 200, 0.1, 0.1, 1.0, 0., 1., False, "Classic", None,
1.0, 100, 10, 30, 0.55, "Use same sampler", "", "",
False, True, 1, True, False, image_previews, False, False, filename_pattern, "./images", False, False, False, True, 1, 0.55,
False, False, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
False, "", "", 0.35, True, True, False, 4, 4, 32,
True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, 0.0,
load_lora_cpu, verbose_info, gpu_duration
):
images = stream_images if isinstance(stream_images, list) else images
progress(1, desc="Inference completed.")
output_image = images[0][0] if images else None
return output_image
#@spaces.GPU
def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
sampler = "Euler", vae = None, translate = True, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
recom_prompt = True, progress=gr.Progress(track_tqdm=True)):
return gr.update()
infer.zerogpu = True
_infer.zerogpu = True
def pass_result(result):
return result
def get_samplers():
return scheduler_names
def get_vaes():
return vae_model_list
cached_diffusers_model_tupled_list = get_tupled_model_list(load_diffusers_format_model)
def get_diffusers_model_list(state: dict = {}):
show_diffusers_model_list_detail = get_state(state, "show_diffusers_model_list_detail")
if show_diffusers_model_list_detail:
return cached_diffusers_model_tupled_list
else:
return load_diffusers_format_model
def enable_diffusers_model_detail(is_enable: bool = False, model_name: str = "", state: dict = {}):
show_diffusers_model_list_detail = is_enable
new_value = model_name
index = 0
if model_name in set(load_diffusers_format_model):
index = load_diffusers_format_model.index(model_name)
if is_enable:
new_value = cached_diffusers_model_tupled_list[index][1]
else:
new_value = load_diffusers_format_model[index]
set_state(state, "show_diffusers_model_list_detail", show_diffusers_model_list_detail)
return gr.update(value=is_enable), gr.update(value=new_value, choices=get_diffusers_model_list(state)), state
def load_model_prompt_dict():
dict = {}
try:
with open('model_dict.json', encoding='utf-8') as f:
dict = json.load(f)
except Exception:
pass
return dict
model_prompt_dict = load_model_prompt_dict()
animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
pony_nps = to_list("source_pony, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
other_ps = to_list("anime artwork, anime style, studio anime, highly detailed, cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed")
other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None", model_recom_prompt_enabled = True):
if not model_recom_prompt_enabled or not model_name: return prompt, neg_prompt
prompts = to_list(prompt)
neg_prompts = to_list(neg_prompt)
prompts = list_sub(prompts, animagine_ps + pony_ps + other_ps)
neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + other_nps)
last_empty_p = [""] if not prompts and type != "None" else []
last_empty_np = [""] if not neg_prompts and type != "None" else []
ps = []
nps = []
if model_name in model_prompt_dict.keys():
ps = to_list(model_prompt_dict[model_name]["prompt"])
nps = to_list(model_prompt_dict[model_name]["negative_prompt"])
else:
ps = default_ps
nps = default_nps
prompts = prompts + ps
neg_prompts = neg_prompts + nps
prompt = ", ".join(list_uniq(prompts) + last_empty_p)
neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
return prompt, neg_prompt
private_lora_dict = {}
try:
with open('lora_dict.json', encoding='utf-8') as f:
d = json.load(f)
for k, v in d.items():
private_lora_dict[escape_lora_basename(k)] = v
except Exception:
pass
private_lora_model_list = get_private_lora_model_lists()
loras_dict = {"None": ["", "", "", "", ""], "": ["", "", "", "", ""]} | private_lora_dict.copy()
loras_url_to_path_dict = {} # {"URL to download": "local filepath", ...}
civitai_last_results = {} # {"URL to download": {search results}, ...}
all_lora_list = []
def get_all_lora_list():
global all_lora_list
loras = get_lora_model_list()
all_lora_list = loras.copy()
return loras
def get_all_lora_tupled_list():
global loras_dict
models = get_all_lora_list()
if not models: return []
tupled_list = []
for model in models:
#if not model: continue # to avoid GUI-related bug
basename = Path(model).stem
key = to_lora_key(model)
items = None
if key in loras_dict.keys():
items = loras_dict.get(key, None)
else:
items = get_civitai_info(model)
if items != None:
loras_dict[key] = items
name = basename
value = model
if items and items[2] != "":
if items[1] == "Pony":
name = f"{basename} (for {items[1]}🐴, {items[2]})"
else:
name = f"{basename} (for {items[1]}, {items[2]})"
tupled_list.append((name, value))
return tupled_list
def update_lora_dict(path: str):
global loras_dict
key = to_lora_key(path)
if key in loras_dict.keys(): return
items = get_civitai_info(path)
if items == None: return
loras_dict[key] = items
def download_lora(dl_urls: str):
global loras_url_to_path_dict
dl_path = ""
before = get_local_model_list(DIRECTORY_LORAS)
urls = []
for url in [url.strip() for url in dl_urls.split(',')]:
local_path = f"{DIRECTORY_LORAS}/{url.split('/')[-1]}"
if not Path(local_path).exists():
download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY)
urls.append(url)
after = get_local_model_list(DIRECTORY_LORAS)
new_files = list_sub(after, before)
i = 0
for file in new_files:
path = Path(file)
if path.exists():
new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
path.resolve().rename(new_path.resolve())
loras_url_to_path_dict[urls[i]] = str(new_path)
update_lora_dict(str(new_path))
dl_path = str(new_path)
i += 1
return dl_path
def copy_lora(path: str, new_path: str):
if path == new_path: return new_path
cpath = Path(path)
npath = Path(new_path)
if cpath.exists():
try:
shutil.copy(str(cpath.resolve()), str(npath.resolve()))
except Exception:
return None
update_lora_dict(str(npath))
return new_path
else:
return None
def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str):
path = download_lora(dl_urls)
if path:
if not lora1 or lora1 == "None":
lora1 = path
elif not lora2 or lora2 == "None":
lora2 = path
elif not lora3 or lora3 == "None":
lora3 = path
elif not lora4 or lora4 == "None":
lora4 = path
elif not lora5 or lora5 == "None":
lora5 = path
choices = get_all_lora_tupled_list()
return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
def set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
import re
lora1 = get_valid_lora_name(lora1, model_name)
lora2 = get_valid_lora_name(lora2, model_name)
lora3 = get_valid_lora_name(lora3, model_name)
lora4 = get_valid_lora_name(lora4, model_name)
lora5 = get_valid_lora_name(lora5, model_name)
if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
on1, label1, tag1, md1 = get_lora_info(lora1)
on2, label2, tag2, md2 = get_lora_info(lora2)
on3, label3, tag3, md3 = get_lora_info(lora3)
on4, label4, tag4, md4 = get_lora_info(lora4)
on5, label5, tag5, md5 = get_lora_info(lora5)
lora_paths = [lora1, lora2, lora3, lora4, lora5]
prompts = prompt.split(",") if prompt else []
for p in prompts:
p = str(p).strip()
if "<lora" in p:
result = re.findall(r'<lora:(.+?):(.+?)>', p)
if not result: continue
key = result[0][0]
wt = result[0][1]
path = to_lora_path(key)
if not key in loras_dict.keys() or not path:
path = get_valid_lora_name(path)
if not path or path == "None": continue
if path in lora_paths:
continue
elif not on1:
lora1 = path
lora_paths = [lora1, lora2, lora3, lora4, lora5]
lora1_wt = safe_float(wt)
on1 = True
elif not on2:
lora2 = path
lora_paths = [lora1, lora2, lora3, lora4, lora5]
lora2_wt = safe_float(wt)
on2 = True
elif not on3:
lora3 = path
lora_paths = [lora1, lora2, lora3, lora4, lora5]
lora3_wt = safe_float(wt)
on3 = True
elif not on4:
lora4 = path
lora_paths = [lora1, lora2, lora3, lora4, lora5]
lora4_wt = safe_float(wt)
on4, label4, tag4, md4 = get_lora_info(lora4)
elif not on5:
lora5 = path
lora_paths = [lora1, lora2, lora3, lora4, lora5]
lora5_wt = safe_float(wt)
on5 = True
return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
def apply_lora_prompt(prompt: str, lora_info: str):
if lora_info == "None": return gr.update(value=prompt)
tags = prompt.split(",") if prompt else []
prompts = normalize_prompt_list(tags)
lora_tag = lora_info.replace("/",",")
lora_tags = lora_tag.split(",") if str(lora_info) != "None" else []
lora_prompts = normalize_prompt_list(lora_tags)
empty = [""]
prompt = ", ".join(list_uniq(prompts + lora_prompts) + empty)
return gr.update(value=prompt)
def update_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
import re
on1, label1, tag1, md1 = get_lora_info(lora1)
on2, label2, tag2, md2 = get_lora_info(lora2)
on3, label3, tag3, md3 = get_lora_info(lora3)
on4, label4, tag4, md4 = get_lora_info(lora4)
on5, label5, tag5, md5 = get_lora_info(lora5)
lora_paths = [lora1, lora2, lora3, lora4, lora5]
prompts = prompt.split(",") if prompt else []
output_prompts = []
for p in prompts:
p = str(p).strip()
if "<lora" in p:
result = re.findall(r'<lora:(.+?):(.+?)>', p)
if not result: continue
key = result[0][0]
wt = result[0][1]
path = to_lora_path(key)
if not key in loras_dict.keys() or not path: continue
if path in lora_paths:
output_prompts.append(f"<lora:{to_lora_key(path)}:{safe_float(wt):.2f}>")
elif p:
output_prompts.append(p)
lora_prompts = []
if on1: lora_prompts.append(f"<lora:{to_lora_key(lora1)}:{lora1_wt:.2f}>")
if on2: lora_prompts.append(f"<lora:{to_lora_key(lora2)}:{lora2_wt:.2f}>")
if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
choices = get_all_lora_tupled_list()
return gr.update(value=output_prompt), gr.update(value=lora1, choices=choices), gr.update(value=lora1_wt),\
gr.update(value=tag1, label=label1, visible=on1), gr.update(visible=on1), gr.update(value=md1, visible=on1),\
gr.update(value=lora2, choices=choices), gr.update(value=lora2_wt),\
gr.update(value=tag2, label=label2, visible=on2), gr.update(visible=on2), gr.update(value=md2, visible=on2),\
gr.update(value=lora3, choices=choices), gr.update(value=lora3_wt),\
gr.update(value=tag3, label=label3, visible=on3), gr.update(visible=on3), gr.update(value=md3, visible=on3),\
gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
def search_civitai_lora(query, base_model=[], sort=CIVITAI_SORT[0], period=CIVITAI_PERIOD[0], tag="", user="", gallery=[]):
global civitai_last_results, civitai_last_choices, civitai_last_gallery
civitai_last_choices = [("", "")]
civitai_last_gallery = []
civitai_last_results = {}
items = search_lora_on_civitai(query, base_model, 100, sort, period, tag, user)
if not items: return gr.update(choices=[("", "")], value="", visible=False),\
gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
civitai_last_results = {}
choices = []
gallery = []
for item in items:
base_model_name = "Pony🐴" if item['base_model'] == "Pony" else item['base_model']
name = f"{item['name']} (for {base_model_name} / By: {item['creator']} / Tags: {', '.join(item['tags'])})"
value = item['dl_url']
choices.append((name, value))
gallery.append((item['img_url'], name))
civitai_last_results[value] = item
if not choices: return gr.update(choices=[("", "")], value="", visible=False),\
gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
civitai_last_choices = choices
civitai_last_gallery = gallery
result = civitai_last_results.get(choices[0][1], "None")
md = result['md'] if result else ""
return gr.update(choices=choices, value=choices[0][1], visible=True), gr.update(value=md, visible=True),\
gr.update(visible=True), gr.update(visible=True), gr.update(value=gallery)
def update_civitai_selection(evt: gr.SelectData):
try:
selected_index = evt.index
selected = civitai_last_choices[selected_index][1]
return gr.update(value=selected)
except Exception:
return gr.update(visible=True)
def select_civitai_lora(search_result):
if not "http" in search_result: return gr.update(value=""), gr.update(value="None", visible=True)
result = civitai_last_results.get(search_result, "None")
md = result['md'] if result else ""
return gr.update(value=search_result), gr.update(value=md, visible=True)
def search_civitai_lora_json(query, base_model):
results = {}
items = search_lora_on_civitai(query, base_model)
if not items: return gr.update(value=results)
for item in items:
results[item['dl_url']] = item
return gr.update(value=results)
quality_prompt_list = [
{
"name": "None",
"prompt": "",
"negative_prompt": "lowres",
},
{
"name": "Animagine Common",
"prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres",
"negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
},
{
"name": "Pony Anime Common",
"prompt": "source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres",
"negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
},
{
"name": "Pony Common",
"prompt": "source_anime, score_9, score_8_up, score_7_up",
"negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
},
{
"name": "Animagine Standard v3.0",
"prompt": "masterpiece, best quality",
"negative_prompt": "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name",
},
{
"name": "Animagine Standard v3.1",
"prompt": "masterpiece, best quality, very aesthetic, absurdres",
"negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
},
{
"name": "Animagine Light v3.1",
"prompt": "(masterpiece), best quality, very aesthetic, perfect face",
"negative_prompt": "(low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn",
},
{
"name": "Animagine Heavy v3.1",
"prompt": "(masterpiece), (best quality), (ultra-detailed), very aesthetic, illustration, disheveled hair, perfect composition, moist skin, intricate details",
"negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair, extra digit, fewer digits, cropped, worst quality, low quality, very displeasing",
},
]
style_list = [
{
"name": "None",
"prompt": "",
"negative_prompt": "",
},
{
"name": "Cinematic",
"prompt": "cinematic still, emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
"negative_prompt": "cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
},
{
"name": "Photographic",
"prompt": "cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed",
"negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
},
{
"name": "Anime",
"prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed",
"negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
},
{
"name": "Manga",
"prompt": "manga style, vibrant, high-energy, detailed, iconic, Japanese comic style",
"negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
},
{
"name": "Digital Art",
"prompt": "concept art, digital artwork, illustrative, painterly, matte painting, highly detailed",
"negative_prompt": "photo, photorealistic, realism, ugly",
},
{
"name": "Pixel art",
"prompt": "pixel-art, low-res, blocky, pixel art style, 8-bit graphics",
"negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
},
{
"name": "Fantasy art",
"prompt": "ethereal fantasy concept art, magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
"negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
},
{
"name": "Neonpunk",
"prompt": "neonpunk style, cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
"negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
},
{
"name": "3D Model",
"prompt": "professional 3d model, octane render, highly detailed, volumetric, dramatic lighting",
"negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
},
]
preset_styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
preset_quality = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in quality_prompt_list}
def process_style_prompt(prompt: str, neg_prompt: str, styles_key: str = "None", quality_key: str = "None"):
def to_list(s):
return [x.strip() for x in s.split(",") if not s == ""]
def list_sub(a, b):
return [e for e in a if e not in b]
def list_uniq(l):
return sorted(set(l), key=l.index)
animagine_ps = to_list("anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres")
animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
pony_ps = to_list("source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
pony_nps = to_list("source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
prompts = to_list(prompt)
neg_prompts = to_list(neg_prompt)
all_styles_ps = []
all_styles_nps = []
for d in style_list:
all_styles_ps.extend(to_list(str(d.get("prompt", ""))))
all_styles_nps.extend(to_list(str(d.get("negative_prompt", ""))))
all_quality_ps = []
all_quality_nps = []
for d in quality_prompt_list:
all_quality_ps.extend(to_list(str(d.get("prompt", ""))))
all_quality_nps.extend(to_list(str(d.get("negative_prompt", ""))))
quality_ps = to_list(preset_quality[quality_key][0])
quality_nps = to_list(preset_quality[quality_key][1])
styles_ps = to_list(preset_styles[styles_key][0])
styles_nps = to_list(preset_styles[styles_key][1])
prompts = list_sub(prompts, animagine_ps + pony_ps + all_styles_ps + all_quality_ps)
neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + all_styles_nps + all_quality_nps)
last_empty_p = [""] if not prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
last_empty_np = [""] if not neg_prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
if type == "Animagine":
prompts = prompts + animagine_ps
neg_prompts = neg_prompts + animagine_nps
elif type == "Pony":
prompts = prompts + pony_ps
neg_prompts = neg_prompts + pony_nps
prompts = prompts + styles_ps + quality_ps
neg_prompts = neg_prompts + styles_nps + quality_nps
prompt = ", ".join(list_uniq(prompts) + last_empty_p)
neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
return gr.update(value=prompt), gr.update(value=neg_prompt)
def save_images(images: list[Image.Image], metadatas: list[str]):
from PIL import PngImagePlugin
try:
output_images = []
for image, metadata in zip(images, metadatas):
info = PngImagePlugin.PngInfo()
info.add_text("parameters", metadata)
savefile = "image.png"
image.save(savefile, "PNG", pnginfo=info)
output_images.append(str(Path(savefile).resolve()))
return output_images
except Exception as e:
print(f"Failed to save image file: {e}")
raise Exception(f"Failed to save image file:") from e