Upload 5 files
Browse files- app.py +7 -3
- dc.py +100 -41
- env.py +5 -0
- llmdolphin.py +176 -0
- modutils.py +63 -35
app.py
CHANGED
@@ -138,9 +138,13 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
|
|
138 |
lora5_copy = gr.Button(value="Copy example to prompt", visible=False)
|
139 |
lora5_md = gr.Markdown(value="", visible=False)
|
140 |
with gr.Accordion("From URL", open=True, visible=True):
|
|
|
|
|
|
|
|
|
141 |
with gr.Row():
|
142 |
lora_search_civitai_query = gr.Textbox(label="Query", placeholder="oomuro sakurako...", lines=1)
|
143 |
-
|
144 |
lora_search_civitai_submit = gr.Button("Search on Civitai")
|
145 |
with gr.Row():
|
146 |
lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
|
@@ -247,9 +251,9 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
|
|
247 |
lora5_copy.click(apply_lora_prompt, [prompt, lora5_info], [prompt], queue=False, show_api=False)
|
248 |
|
249 |
gr.on(
|
250 |
-
triggers=[lora_search_civitai_submit.click, lora_search_civitai_query.submit],
|
251 |
fn=search_civitai_lora,
|
252 |
-
inputs=[lora_search_civitai_query, lora_search_civitai_basemodel],
|
253 |
outputs=[lora_search_civitai_result, lora_search_civitai_desc, lora_search_civitai_submit, lora_search_civitai_query],
|
254 |
scroll_to_output=True,
|
255 |
queue=True,
|
|
|
138 |
lora5_copy = gr.Button(value="Copy example to prompt", visible=False)
|
139 |
lora5_md = gr.Markdown(value="", visible=False)
|
140 |
with gr.Accordion("From URL", open=True, visible=True):
|
141 |
+
with gr.Row():
|
142 |
+
lora_search_civitai_basemodel = gr.CheckboxGroup(label="Search LoRA for", choices=["Pony", "SD 1.5", "SDXL 1.0", "Flux.1 D", "Flux.1 S"], value=["Pony", "SDXL 1.0"])
|
143 |
+
lora_search_civitai_sort = gr.Radio(label="Sort", choices=["Highest Rated", "Most Downloaded", "Newest"], value="Highest Rated")
|
144 |
+
lora_search_civitai_period = gr.Radio(label="Period", choices=["AllTime", "Year", "Month", "Week", "Day"], value="AllTime")
|
145 |
with gr.Row():
|
146 |
lora_search_civitai_query = gr.Textbox(label="Query", placeholder="oomuro sakurako...", lines=1)
|
147 |
+
lora_search_civitai_tag = gr.Textbox(label="Tag", lines=1)
|
148 |
lora_search_civitai_submit = gr.Button("Search on Civitai")
|
149 |
with gr.Row():
|
150 |
lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
|
|
|
251 |
lora5_copy.click(apply_lora_prompt, [prompt, lora5_info], [prompt], queue=False, show_api=False)
|
252 |
|
253 |
gr.on(
|
254 |
+
triggers=[lora_search_civitai_submit.click, lora_search_civitai_query.submit, lora_search_civitai_tag.submit],
|
255 |
fn=search_civitai_lora,
|
256 |
+
inputs=[lora_search_civitai_query, lora_search_civitai_basemodel, lora_search_civitai_sort, lora_search_civitai_period, lora_search_civitai_tag],
|
257 |
outputs=[lora_search_civitai_result, lora_search_civitai_desc, lora_search_civitai_submit, lora_search_civitai_query],
|
258 |
scroll_to_output=True,
|
259 |
queue=True,
|
dc.py
CHANGED
@@ -21,12 +21,9 @@ from stablepy import (
|
|
21 |
SD15_TASKS,
|
22 |
SDXL_TASKS,
|
23 |
)
|
|
|
24 |
#import urllib.parse
|
25 |
import gradio as gr
|
26 |
-
from PIL import Image
|
27 |
-
import IPython.display
|
28 |
-
import time, json
|
29 |
-
from IPython.utils import capture
|
30 |
import logging
|
31 |
logging.getLogger("diffusers").setLevel(logging.ERROR)
|
32 |
import diffusers
|
@@ -381,7 +378,7 @@ class GuiSD:
|
|
381 |
if vae_model:
|
382 |
vae_type = "SDXL" if "sdxl" in vae_model.lower() else "SD 1.5"
|
383 |
if model_type != vae_type:
|
384 |
-
gr.
|
385 |
|
386 |
self.model.device = torch.device("cpu")
|
387 |
dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
|
@@ -395,7 +392,7 @@ class GuiSD:
|
|
395 |
)
|
396 |
#yield f"Model loaded: {model_name}"
|
397 |
|
398 |
-
|
399 |
@torch.inference_mode()
|
400 |
def generate_pipeline(
|
401 |
self,
|
@@ -508,7 +505,7 @@ class GuiSD:
|
|
508 |
vae_model = vae_model if vae_model != "None" else None
|
509 |
loras_list = [lora1, lora2, lora3, lora4, lora5]
|
510 |
vae_msg = f"VAE: {vae_model}" if vae_model else ""
|
511 |
-
msg_lora =
|
512 |
|
513 |
print("Config model:", model_name, vae_model, loras_list)
|
514 |
|
@@ -679,35 +676,94 @@ class GuiSD:
|
|
679 |
return self.infer_short(self.model, pipe_params, progress), info_state
|
680 |
## END MOD
|
681 |
|
682 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
683 |
|
684 |
-
|
685 |
-
|
686 |
-
|
687 |
-
# lora_B=args[9] if args[9] != "None" else None, lora_scale_B=args[10],
|
688 |
-
# lora_C=args[11] if args[11] != "None" else None, lora_scale_C=args[12],
|
689 |
-
# lora_D=args[13] if args[13] != "None" else None, lora_scale_D=args[14],
|
690 |
-
# lora_E=args[15] if args[15] != "None" else None, lora_scale_E=args[16],
|
691 |
-
# )
|
692 |
|
693 |
-
#
|
694 |
-
# print(status_lora)
|
695 |
-
# for status, lora in zip(status_lora, lora_list):
|
696 |
-
# if status:
|
697 |
-
# gr.Info(f"LoRA loaded: {lora}")
|
698 |
-
# elif status is not None:
|
699 |
-
# gr.Warning(f"Failed to load LoRA: {lora}")
|
700 |
|
701 |
-
|
702 |
-
# # gr.Info(f"LoRAs in cache: {", ".join(str(x) for x in self.model.lora_memory if x is not None)}")
|
703 |
|
704 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
705 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
706 |
|
707 |
-
|
|
|
|
|
708 |
|
709 |
from pathlib import Path
|
710 |
-
import
|
|
|
711 |
from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
|
712 |
get_local_model_list, get_private_lora_model_lists, get_valid_lora_name,
|
713 |
get_valid_lora_path, get_valid_lora_wt, get_lora_info,
|
@@ -723,6 +779,10 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
723 |
import numpy as np
|
724 |
MAX_SEED = np.iinfo(np.int32).max
|
725 |
|
|
|
|
|
|
|
|
|
726 |
images: list[tuple[PIL.Image.Image, str | None]] = []
|
727 |
info: str = ""
|
728 |
progress(0, desc="Preparing...")
|
@@ -739,7 +799,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
739 |
prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name)
|
740 |
progress(0.5, desc="Preparing...")
|
741 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
|
742 |
-
set_prompt_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
|
743 |
lora1 = get_valid_lora_path(lora1)
|
744 |
lora2 = get_valid_lora_path(lora2)
|
745 |
lora3 = get_valid_lora_path(lora3)
|
@@ -748,7 +808,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
748 |
progress(1, desc="Preparation completed. Starting inference preparation...")
|
749 |
|
750 |
sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0], progress)
|
751 |
-
images, info =
|
752 |
guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
|
753 |
lora4, lora4_wt, lora5, lora5_wt, sampler,
|
754 |
height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,
|
@@ -757,7 +817,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
757 |
False, True, 1, True, False, False, False, False, "./images", False, False, False, True, 1, 0.55,
|
758 |
False, False, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
|
759 |
False, "", "", 0.35, True, True, False, 4, 4, 32,
|
760 |
-
True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, 0.0, progress
|
761 |
)
|
762 |
|
763 |
progress(1, desc="Inference completed.")
|
@@ -820,7 +880,7 @@ def get_t2i_model_info(repo_id: str):
|
|
820 |
if " " in repo_id or not api.repo_exists(repo_id): return ""
|
821 |
model = api.model_info(repo_id=repo_id)
|
822 |
except Exception as e:
|
823 |
-
print(f"Error: Failed to get {repo_id}'s info. ")
|
824 |
return ""
|
825 |
if model.private or model.gated: return ""
|
826 |
tags = model.tags
|
@@ -1013,13 +1073,13 @@ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: st
|
|
1013 |
gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
|
1014 |
|
1015 |
|
1016 |
-
def set_prompt_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
|
1017 |
import re
|
1018 |
-
lora1 = get_valid_lora_name(lora1)
|
1019 |
-
lora2 = get_valid_lora_name(lora2)
|
1020 |
-
lora3 = get_valid_lora_name(lora3)
|
1021 |
-
lora4 = get_valid_lora_name(lora4)
|
1022 |
-
lora5 = get_valid_lora_name(lora5)
|
1023 |
if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
|
1024 |
lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
|
1025 |
lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
|
@@ -1129,9 +1189,9 @@ def update_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora
|
|
1129 |
gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
|
1130 |
|
1131 |
|
1132 |
-
def search_civitai_lora(query, base_model):
|
1133 |
global civitai_lora_last_results
|
1134 |
-
items = search_lora_on_civitai(query, base_model)
|
1135 |
if not items: return gr.update(choices=[("", "")], value="", visible=False),\
|
1136 |
gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
|
1137 |
civitai_lora_last_results = {}
|
@@ -1324,7 +1384,6 @@ def process_style_prompt(prompt: str, neg_prompt: str, styles_key: str = "None",
|
|
1324 |
return gr.update(value=prompt), gr.update(value=neg_prompt)
|
1325 |
|
1326 |
|
1327 |
-
from PIL import Image
|
1328 |
def save_images(images: list[Image.Image], metadatas: list[str]):
|
1329 |
from PIL import PngImagePlugin
|
1330 |
try:
|
|
|
21 |
SD15_TASKS,
|
22 |
SDXL_TASKS,
|
23 |
)
|
24 |
+
import time
|
25 |
#import urllib.parse
|
26 |
import gradio as gr
|
|
|
|
|
|
|
|
|
27 |
import logging
|
28 |
logging.getLogger("diffusers").setLevel(logging.ERROR)
|
29 |
import diffusers
|
|
|
378 |
if vae_model:
|
379 |
vae_type = "SDXL" if "sdxl" in vae_model.lower() else "SD 1.5"
|
380 |
if model_type != vae_type:
|
381 |
+
gr.Warning(msg_inc_vae)
|
382 |
|
383 |
self.model.device = torch.device("cpu")
|
384 |
dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
|
|
|
392 |
)
|
393 |
#yield f"Model loaded: {model_name}"
|
394 |
|
395 |
+
#@spaces.GPU
|
396 |
@torch.inference_mode()
|
397 |
def generate_pipeline(
|
398 |
self,
|
|
|
505 |
vae_model = vae_model if vae_model != "None" else None
|
506 |
loras_list = [lora1, lora2, lora3, lora4, lora5]
|
507 |
vae_msg = f"VAE: {vae_model}" if vae_model else ""
|
508 |
+
msg_lora = ""
|
509 |
|
510 |
print("Config model:", model_name, vae_model, loras_list)
|
511 |
|
|
|
676 |
return self.infer_short(self.model, pipe_params, progress), info_state
|
677 |
## END MOD
|
678 |
|
679 |
+
def dynamic_gpu_duration(func, duration, *args):
|
680 |
+
|
681 |
+
@spaces.GPU(duration=duration)
|
682 |
+
def wrapped_func():
|
683 |
+
yield from func(*args)
|
684 |
+
|
685 |
+
return wrapped_func()
|
686 |
+
|
687 |
+
|
688 |
+
@spaces.GPU
|
689 |
+
def dummy_gpu():
|
690 |
+
return None
|
691 |
+
|
692 |
+
|
693 |
+
def sd_gen_generate_pipeline(*args):
|
694 |
+
|
695 |
+
gpu_duration_arg = int(args[-1]) if args[-1] else 59
|
696 |
+
verbose_arg = int(args[-2])
|
697 |
+
load_lora_cpu = args[-3]
|
698 |
+
generation_args = args[:-3]
|
699 |
+
lora_list = [
|
700 |
+
None if item == "None" or item == "" else item
|
701 |
+
for item in [args[7], args[9], args[11], args[13], args[15]]
|
702 |
+
]
|
703 |
+
lora_status = [None] * 5
|
704 |
+
|
705 |
+
msg_load_lora = "Updating LoRAs in GPU..."
|
706 |
+
if load_lora_cpu:
|
707 |
+
msg_load_lora = "Updating LoRAs in CPU (Slow but saves GPU usage)..."
|
708 |
+
|
709 |
+
if lora_list != sd_gen.model.lora_memory and lora_list != [None] * 5:
|
710 |
+
yield None, msg_load_lora
|
711 |
+
|
712 |
+
# Load lora in CPU
|
713 |
+
if load_lora_cpu:
|
714 |
+
lora_status = sd_gen.model.lora_merge(
|
715 |
+
lora_A=lora_list[0], lora_scale_A=args[8],
|
716 |
+
lora_B=lora_list[1], lora_scale_B=args[10],
|
717 |
+
lora_C=lora_list[2], lora_scale_C=args[12],
|
718 |
+
lora_D=lora_list[3], lora_scale_D=args[14],
|
719 |
+
lora_E=lora_list[4], lora_scale_E=args[16],
|
720 |
+
)
|
721 |
+
print(lora_status)
|
722 |
+
|
723 |
+
if verbose_arg:
|
724 |
+
for status, lora in zip(lora_status, lora_list):
|
725 |
+
if status:
|
726 |
+
gr.Info(f"LoRA loaded in CPU: {lora}")
|
727 |
+
elif status is not None:
|
728 |
+
gr.Warning(f"Failed to load LoRA: {lora}")
|
729 |
+
|
730 |
+
if lora_status == [None] * 5 and sd_gen.model.lora_memory != [None] * 5 and load_lora_cpu:
|
731 |
+
lora_cache_msg = ", ".join(
|
732 |
+
str(x) for x in sd_gen.model.lora_memory if x is not None
|
733 |
+
)
|
734 |
+
gr.Info(f"LoRAs in cache: {lora_cache_msg}")
|
735 |
|
736 |
+
msg_request = f"Requesting {gpu_duration_arg}s. of GPU time"
|
737 |
+
gr.Info(msg_request)
|
738 |
+
print(msg_request)
|
|
|
|
|
|
|
|
|
|
|
739 |
|
740 |
+
# yield from sd_gen.generate_pipeline(*generation_args)
|
|
|
|
|
|
|
|
|
|
|
|
|
741 |
|
742 |
+
start_time = time.time()
|
|
|
743 |
|
744 |
+
yield from dynamic_gpu_duration(
|
745 |
+
sd_gen.generate_pipeline,
|
746 |
+
gpu_duration_arg,
|
747 |
+
*generation_args,
|
748 |
+
)
|
749 |
+
|
750 |
+
end_time = time.time()
|
751 |
|
752 |
+
if verbose_arg:
|
753 |
+
execution_time = end_time - start_time
|
754 |
+
msg_task_complete = (
|
755 |
+
f"GPU task complete in: {round(execution_time, 0) + 1} seconds"
|
756 |
+
)
|
757 |
+
gr.Info(msg_task_complete)
|
758 |
+
print(msg_task_complete)
|
759 |
|
760 |
+
|
761 |
+
dynamic_gpu_duration.zerogpu = True
|
762 |
+
sd_gen_generate_pipeline.zerogpu = True
|
763 |
|
764 |
from pathlib import Path
|
765 |
+
from PIL import Image
|
766 |
+
import random, json
|
767 |
from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
|
768 |
get_local_model_list, get_private_lora_model_lists, get_valid_lora_name,
|
769 |
get_valid_lora_path, get_valid_lora_wt, get_lora_info,
|
|
|
779 |
import numpy as np
|
780 |
MAX_SEED = np.iinfo(np.int32).max
|
781 |
|
782 |
+
load_lora_cpu = False
|
783 |
+
verbose_info = False
|
784 |
+
gpu_duration = 59
|
785 |
+
|
786 |
images: list[tuple[PIL.Image.Image, str | None]] = []
|
787 |
info: str = ""
|
788 |
progress(0, desc="Preparing...")
|
|
|
799 |
prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name)
|
800 |
progress(0.5, desc="Preparing...")
|
801 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
|
802 |
+
set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
|
803 |
lora1 = get_valid_lora_path(lora1)
|
804 |
lora2 = get_valid_lora_path(lora2)
|
805 |
lora3 = get_valid_lora_path(lora3)
|
|
|
808 |
progress(1, desc="Preparation completed. Starting inference preparation...")
|
809 |
|
810 |
sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0], progress)
|
811 |
+
images, info = sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
|
812 |
guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
|
813 |
lora4, lora4_wt, lora5, lora5_wt, sampler,
|
814 |
height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,
|
|
|
817 |
False, True, 1, True, False, False, False, False, "./images", False, False, False, True, 1, 0.55,
|
818 |
False, False, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
|
819 |
False, "", "", 0.35, True, True, False, 4, 4, 32,
|
820 |
+
True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, 0.0, load_lora_cpu, verbose_info, gpu_duration, progress
|
821 |
)
|
822 |
|
823 |
progress(1, desc="Inference completed.")
|
|
|
880 |
if " " in repo_id or not api.repo_exists(repo_id): return ""
|
881 |
model = api.model_info(repo_id=repo_id)
|
882 |
except Exception as e:
|
883 |
+
print(f"Error: Failed to get {repo_id}'s info. {e}")
|
884 |
return ""
|
885 |
if model.private or model.gated: return ""
|
886 |
tags = model.tags
|
|
|
1073 |
gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
|
1074 |
|
1075 |
|
1076 |
+
def set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
|
1077 |
import re
|
1078 |
+
lora1 = get_valid_lora_name(lora1, model_name)
|
1079 |
+
lora2 = get_valid_lora_name(lora2, model_name)
|
1080 |
+
lora3 = get_valid_lora_name(lora3, model_name)
|
1081 |
+
lora4 = get_valid_lora_name(lora4, model_name)
|
1082 |
+
lora5 = get_valid_lora_name(lora5, model_name)
|
1083 |
if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
|
1084 |
lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
|
1085 |
lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
|
|
|
1189 |
gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
|
1190 |
|
1191 |
|
1192 |
+
def search_civitai_lora(query, base_model, sort="Highest Rated", period="AllTime", tag=""):
|
1193 |
global civitai_lora_last_results
|
1194 |
+
items = search_lora_on_civitai(query, base_model, 100, sort, period, tag)
|
1195 |
if not items: return gr.update(choices=[("", "")], value="", visible=False),\
|
1196 |
gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
|
1197 |
civitai_lora_last_results = {}
|
|
|
1384 |
return gr.update(value=prompt), gr.update(value=neg_prompt)
|
1385 |
|
1386 |
|
|
|
1387 |
def save_images(images: list[Image.Image], metadatas: list[str]):
|
1388 |
from PIL import PngImagePlugin
|
1389 |
try:
|
env.py
CHANGED
@@ -102,6 +102,11 @@ load_diffusers_format_model = [
|
|
102 |
"Raelina/Raemu-Flux",
|
103 |
]
|
104 |
|
|
|
|
|
|
|
|
|
|
|
105 |
# List all Models for specified user
|
106 |
HF_MODEL_USER_LIKES = ["votepurchase"] # sorted by number of likes
|
107 |
HF_MODEL_USER_EX = ["John6666"] # sorted by a special rule
|
|
|
102 |
"Raelina/Raemu-Flux",
|
103 |
]
|
104 |
|
105 |
+
DIFFUSERS_FORMAT_LORAS = [
|
106 |
+
"nerijs/animation2k-flux",
|
107 |
+
"XLabs-AI/flux-RealismLora",
|
108 |
+
]
|
109 |
+
|
110 |
# List all Models for specified user
|
111 |
HF_MODEL_USER_LIKES = ["votepurchase"] # sorted by number of likes
|
112 |
HF_MODEL_USER_EX = ["John6666"] # sorted by a special rule
|
llmdolphin.py
CHANGED
@@ -7,6 +7,7 @@ from llama_cpp_agent.chat_history import BasicChatHistory
|
|
7 |
from llama_cpp_agent.chat_history.messages import Roles
|
8 |
from ja_to_danbooru.ja_to_danbooru import jatags_to_danbooru_tags
|
9 |
import wrapt_timeout_decorator
|
|
|
10 |
|
11 |
|
12 |
llm_models_dir = "./llm_models"
|
@@ -19,6 +20,8 @@ llm_models = {
|
|
19 |
#"": ["", MessagesFormatterType.PHI_3],
|
20 |
"mn-12b-lyra-v2a1-q5_k_m.gguf": ["HalleyStarbun/MN-12B-Lyra-v2a1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
|
21 |
"L3-8B-Tamamo-v1.i1-Q5_K_M.gguf": ["mradermacher/L3-8B-Tamamo-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
|
|
|
|
22 |
"Instant-RP-Noodles-12B-v1.3.Q4_K_M.gguf": ["mradermacher/Instant-RP-Noodles-12B-v1.3-GGUF", MessagesFormatterType.MISTRAL],
|
23 |
"MN-12B-Lyra-v4-Q4_K_M.gguf": ["bartowski/MN-12B-Lyra-v4-GGUF", MessagesFormatterType.CHATML],
|
24 |
"Lyra4-Gutenberg-12B.Q4_K_M.gguf": ["mradermacher/Lyra4-Gutenberg-12B-GGUF", MessagesFormatterType.CHATML],
|
@@ -50,9 +53,108 @@ llm_models = {
|
|
50 |
"StarDust-12b-v2.i1-Q5_K_M.gguf": ["mradermacher/StarDust-12b-v2-i1-GGUF", MessagesFormatterType.CHATML],
|
51 |
"Rocinante-12B-v2c-Q4_K_M.gguf": ["TheDrummer/UnslopNemo-v1-GGUF", MessagesFormatterType.MISTRAL],
|
52 |
"mn-maghin-12b-q6_k.gguf": ["rityak/MN-Maghin-12B-Q6_K-GGUF", MessagesFormatterType.MISTRAL],
|
|
|
|
|
53 |
"Trinas_Nectar-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Trinas_Nectar-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
|
54 |
"ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
|
55 |
"ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
"L3.1-Vulca-Epith-Bluegrade-v0.2-8B.q8_0.gguf": ["kromquant/L3.1-Vulca-Epith-Bluegrade-v0.2-8B-GGUFs", MessagesFormatterType.LLAMA_3],
|
57 |
"llama-3.1-8b-omnimatrix-iq4_nl-imat.gguf": ["bunnycore/Llama-3.1-8B-OmniMatrix-IQ4_NL-GGUF", MessagesFormatterType.LLAMA_3],
|
58 |
"L3.1-Artemis-d-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Artemis-d-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
@@ -481,7 +583,9 @@ llm_models = {
|
|
481 |
"Meta-Llama-3.1-8B-Claude-iMat-Q5_K_M.gguf": ["InferenceIllusionist/Meta-Llama-3.1-8B-Claude-iMat-GGUF", MessagesFormatterType.LLAMA_3],
|
482 |
"Phi-3.1-mini-128k-instruct-Q6_K_L.gguf": ["bartowski/Phi-3.1-mini-128k-instruct-GGUF", MessagesFormatterType.PHI_3],
|
483 |
"tifa-7b-qwen2-v0.1.q4_k_m.gguf": ["Tifa-RP/Tifa-7B-Qwen2-v0.1-GGUF", MessagesFormatterType.OPEN_CHAT],
|
|
|
484 |
"Oumuamua-7b-RP_Q5_K_M.gguf": ["Aratako/Oumuamua-7b-RP-GGUF", MessagesFormatterType.MISTRAL],
|
|
|
485 |
"Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW_iMat_Ch200_IQ4_XS.gguf": ["dddump/Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW-gguf", MessagesFormatterType.VICUNA],
|
486 |
"ChatWaifu_v1.2.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.2.1-GGUF", MessagesFormatterType.MISTRAL],
|
487 |
"ChatWaifu_v1.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.1-GGUF", MessagesFormatterType.MISTRAL],
|
@@ -1087,3 +1191,75 @@ def dolphin_parse_simple(
|
|
1087 |
else:
|
1088 |
prompts = list_uniq(to_list(raw_prompt) + ["nsfw", "explicit", "rating_explicit"])
|
1089 |
return ", ".join(prompts)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
from llama_cpp_agent.chat_history.messages import Roles
|
8 |
from ja_to_danbooru.ja_to_danbooru import jatags_to_danbooru_tags
|
9 |
import wrapt_timeout_decorator
|
10 |
+
from pathlib import Path
|
11 |
|
12 |
|
13 |
llm_models_dir = "./llm_models"
|
|
|
20 |
#"": ["", MessagesFormatterType.PHI_3],
|
21 |
"mn-12b-lyra-v2a1-q5_k_m.gguf": ["HalleyStarbun/MN-12B-Lyra-v2a1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
|
22 |
"L3-8B-Tamamo-v1.i1-Q5_K_M.gguf": ["mradermacher/L3-8B-Tamamo-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
23 |
+
"MN-12B-Mag-Mell-Q4_K_M.gguf": ["inflatebot/MN-12B-Mag-Mell-R1-GGUF", MessagesFormatterType.MISTRAL],
|
24 |
+
"Qwen2.5-14B-Instruct-Q4_K_M.gguf": ["bartowski/Qwen2.5-14B-Instruct-GGUF", MessagesFormatterType.OPEN_CHAT],
|
25 |
"Instant-RP-Noodles-12B-v1.3.Q4_K_M.gguf": ["mradermacher/Instant-RP-Noodles-12B-v1.3-GGUF", MessagesFormatterType.MISTRAL],
|
26 |
"MN-12B-Lyra-v4-Q4_K_M.gguf": ["bartowski/MN-12B-Lyra-v4-GGUF", MessagesFormatterType.CHATML],
|
27 |
"Lyra4-Gutenberg-12B.Q4_K_M.gguf": ["mradermacher/Lyra4-Gutenberg-12B-GGUF", MessagesFormatterType.CHATML],
|
|
|
53 |
"StarDust-12b-v2.i1-Q5_K_M.gguf": ["mradermacher/StarDust-12b-v2-i1-GGUF", MessagesFormatterType.CHATML],
|
54 |
"Rocinante-12B-v2c-Q4_K_M.gguf": ["TheDrummer/UnslopNemo-v1-GGUF", MessagesFormatterType.MISTRAL],
|
55 |
"mn-maghin-12b-q6_k.gguf": ["rityak/MN-Maghin-12B-Q6_K-GGUF", MessagesFormatterType.MISTRAL],
|
56 |
+
"qwen2.5-lumen-14b-q4_k_m.gguf": ["Lambent/Qwen2.5-Lumen-14B-Q4_K_M-GGUF", MessagesFormatterType.OPEN_CHAT],
|
57 |
+
"Qwen2.5-14B_Uncensored_Instruct.Q4_K_M.gguf": ["mradermacher/Qwen2.5-14B_Uncensored_Instruct-GGUF", MessagesFormatterType.OPEN_CHAT],
|
58 |
"Trinas_Nectar-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Trinas_Nectar-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
|
59 |
"ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
|
60 |
"ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
|
61 |
+
"Collaiborator-MEDLLM-Llama-3-8B-v1.i1-Q5_K_M.gguf": ["mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
62 |
+
"Chili_Dog_8B.i1-Q4_K_M.gguf": ["mradermacher/Chili_Dog_8B-i1-GGUF", MessagesFormatterType.CHATML],
|
63 |
+
"astra-v1-12b-q5_k_m.gguf": ["P0x0/Astra-v1-12B-GGUF", MessagesFormatterType.MISTRAL],
|
64 |
+
"Llama-3.1-8B-TitanFusion-Mix.Q4_K_S.gguf": ["mradermacher/Llama-3.1-8B-TitanFusion-Mix-GGUF", MessagesFormatterType.LLAMA_3],
|
65 |
+
"Qwen2.5-7B-Instruct-kowiki-qa-Q5_K_M.gguf": ["teddylee777/Qwen2.5-7B-Instruct-kowiki-qa-gguf", MessagesFormatterType.OPEN_CHAT],
|
66 |
+
"Qwen2.5-7B-Instruct-abliterated-v2.Q5_K_M.gguf": ["mradermacher/Qwen2.5-7B-Instruct-abliterated-v2-GGUF", MessagesFormatterType.OPEN_CHAT],
|
67 |
+
"Mistral-Small-Instruct-2409-abliterated.Q4_K_S.gguf": ["mradermacher/Mistral-Small-Instruct-2409-abliterated-GGUF", MessagesFormatterType.MISTRAL],
|
68 |
+
"Qwen2.5-14b-web.Q4_K_S.gguf": ["mradermacher/Qwen2.5-14b-web-GGUF", MessagesFormatterType.OPEN_CHAT],
|
69 |
+
"EVA-Yi-1.5-9B-32K-V1.i1-Q4_K_M.gguf": ["mradermacher/EVA-Yi-1.5-9B-32K-V1-i1-GGUF", MessagesFormatterType.MISTRAL],
|
70 |
+
"Eunoia_Vespera-8B-LINEAR.i1-Q5_K_M.gguf": ["mradermacher/Eunoia_Vespera-8B-LINEAR-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
71 |
+
"fatgirlmagicv2.i1-Q4_K_M.gguf": ["mradermacher/fatgirlmagicv2-i1-GGUF", MessagesFormatterType.CHATML],
|
72 |
+
"Pullulation-2-9B.Q4_K_S.gguf": ["mradermacher/Pullulation-2-9B-GGUF", MessagesFormatterType.ALPACA],
|
73 |
+
"Neithabet-9G.Q4_K_M.gguf": ["mradermacher/Neithabet-9G-GGUF", MessagesFormatterType.ALPACA],
|
74 |
+
"magicfatgirlv2.Q4_K_M.gguf": ["mradermacher/magicfatgirlv2-GGUF", MessagesFormatterType.CHATML],
|
75 |
+
"Llama-3.1-8B-TitanFusion-v3.Q5_K_M.gguf": ["mradermacher/Llama-3.1-8B-TitanFusion-v3-GGUF", MessagesFormatterType.LLAMA_3],
|
76 |
+
"Fatgirl_v2_8B.Q5_K_M.gguf": ["mradermacher/Fatgirl_v2_8B-GGUF", MessagesFormatterType.CHATML],
|
77 |
+
"wip-test_pending_4-q4_k_m.gguf": ["DreadPoor/WIP-TEST_PENDING_4-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
|
78 |
+
"himeyuri12b-01-Q4_K_M.gguf": ["Elizezen/Himeyuri-v0.1-12B-GGUF", MessagesFormatterType.MISTRAL],
|
79 |
+
"Qwen2.5-14B-Gutenberg-1e-Delta.Q4_K_M.gguf": ["QuantFactory/Qwen2.5-14B-Gutenberg-1e-Delta-GGUF", MessagesFormatterType.OPEN_CHAT],
|
80 |
+
"mn-12b-stellar-veil-q6_k.gguf": ["v000000/MN-12B-Stellar-Veil-Q6_K-GGUF", MessagesFormatterType.MISTRAL],
|
81 |
+
"stock.g2mma-q4_k_m.gguf": ["ClaudioItaly/Stock.G2mma-Q4_K_M-GGUF", MessagesFormatterType.ALPACA],
|
82 |
+
"Athena-gemma-2-9b-it-Philos-KTO.i1-Q4_K_M.gguf": ["mradermacher/Athena-gemma-2-9b-it-Philos-KTO-i1-GGUF", MessagesFormatterType.ALPACA],
|
83 |
+
"Qwen2.5-14B-Gutenberg-Instruct-Slerpeno.Q4_K_M.gguf": ["mradermacher/Qwen2.5-14B-Gutenberg-Instruct-Slerpeno-GGUF", MessagesFormatterType.OPEN_CHAT],
|
84 |
+
"typressai-9b-q4_k_m.gguf": ["ClaudioItaly/TypressAI-9B-Q4_K_M-GGUF", MessagesFormatterType.ALPACA],
|
85 |
+
"josiefied-qwen2.5-7b-instruct-abliterated-v2.Q5_K_M.gguf": ["Isaak-Carter/Josiefied-Qwen2.5-7B-Instruct-abliterated-v2-gguf", MessagesFormatterType.OPEN_CHAT],
|
86 |
+
"josiefied-qwen2.5-7b-instruct-abliterated.Q5_K_M.gguf": ["Isaak-Carter/Josiefied-Qwen2.5-7B-Instruct-abliterated-gguf", MessagesFormatterType.OPEN_CHAT],
|
87 |
+
"Fireball-Llama-3.1-8B-Philos-Reflection-v0.2.i1-Q5_K_M.gguf": ["mradermacher/Fireball-Llama-3.1-8B-Philos-Reflection-v0.2-i1-GGUF", MessagesFormatterType.MISTRAL],
|
88 |
+
"ArliAI-RPMax-Formax-v1.Q5_K_M.gguf": ["mradermacher/ArliAI-RPMax-Formax-v1-GGUF", MessagesFormatterType.LLAMA_3],
|
89 |
+
"Agente-Llama-3.1-Asistant-16bit-v2.Q5_K_M.gguf": ["mradermacher/Agente-Llama-3.1-Asistant-16bit-v2-GGUF", MessagesFormatterType.LLAMA_3],
|
90 |
+
"Magnum-Blackout-Ataraxy-4-9b.Q4_K_M.gguf": ["mradermacher/Magnum-Blackout-Ataraxy-4-9b-GGUF", MessagesFormatterType.ALPACA],
|
91 |
+
"Magnum-Blackout-Ataraxy-5-9b.Q4_K_M.gguf": ["mradermacher/Magnum-Blackout-Ataraxy-5-9b-GGUF", MessagesFormatterType.ALPACA],
|
92 |
+
"Vapor_7B.Q5_K_M.gguf": ["mradermacher/Vapor_7B-GGUF", MessagesFormatterType.OPEN_CHAT],
|
93 |
+
"fusion-guide-12b-0.1.Q4_K_M.gguf": ["mradermacher/fusion-guide-12b-0.1-GGUF", MessagesFormatterType.MISTRAL],
|
94 |
+
"Meta-Llama-3.1-8B-Instruct-HalfAbliterated-TIES.i1-Q4_K_M.gguf": ["mradermacher/Meta-Llama-3.1-8B-Instruct-HalfAbliterated-TIES-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
95 |
+
"Qwen2.5-14B_Uncencored-Q4_K_M.gguf": ["bartowski/Qwen2.5-14B_Uncencored-GGUF", MessagesFormatterType.OPEN_CHAT],
|
96 |
+
"L3.1-Niitorm-8B-DPO-t0.0001.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Niitorm-8B-DPO-t0.0001-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
97 |
+
"Hermes-ClimateStorm-Sauerkraut-abliterated.i1-Q5_K_M.gguf": ["mradermacher/Hermes-ClimateStorm-Sauerkraut-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
98 |
+
"qwen2.5-boosted-q5_k_m.gguf": ["ClaudioItaly/Qwen2.5-Boosted-Q5_K_M-GGUF", MessagesFormatterType.OPEN_CHAT],
|
99 |
+
"qwen2.5-14b-q4_k_m.gguf": ["Triangle104/Qwen2.5-14B-Q4_K_M-GGUF", MessagesFormatterType.OPEN_CHAT],
|
100 |
+
"Qwen2.5-7B-Instruct-Q4_K_M.gguf": ["bartowski/Qwen2.5-7B-Instruct-GGUF", MessagesFormatterType.OPEN_CHAT],
|
101 |
+
"Hermes-Storm-lorablated.i1-Q5_K_M.gguf": ["mradermacher/Hermes-Storm-lorablated-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
102 |
+
"Gemma-Ataraxy-Dare-NoBase-9b.i1-Q4_K_M.gguf": ["mradermacher/Gemma-Ataraxy-Dare-NoBase-9b-i1-GGUF", MessagesFormatterType.ALPACA],
|
103 |
+
"Axolotl-Llama-3.1-8B-instruct-finetuned-V3-merged.Q4_K_M.gguf": ["mradermacher/Axolotl-Llama-3.1-8B-instruct-finetuned-V3-merged-GGUF", MessagesFormatterType.LLAMA_3],
|
104 |
+
"Llama-3.1-8B-TitanFusion-v2.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-8B-TitanFusion-v2-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
105 |
+
"Magnum-Blackout-Ataraxy-2-9b.Q4_K_M.gguf": ["mradermacher/Magnum-Blackout-Ataraxy-2-9b-GGUF", MessagesFormatterType.ALPACA],
|
106 |
+
"Aspire-8B-model_stock.i1-Q5_K_M.gguf": ["mradermacher/Aspire-8B-model_stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
107 |
+
"L3.1-Celestial-Stone-2x8B-DPO.Q4_K_M.gguf": ["mradermacher/L3.1-Celestial-Stone-2x8B-DPO-GGUF", MessagesFormatterType.LLAMA_3],
|
108 |
+
"L3-Dark-Planet-8B-V2-Eight-Orbs-Of-Power.i1-Q4_K_M.gguf": ["mradermacher/L3-Dark-Planet-8B-V2-Eight-Orbs-Of-Power-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
109 |
+
"l3-luna-8b-q5_k_m-imat.gguf": ["Casual-Autopsy/L3-Luna-8B-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
|
110 |
+
"Insanity.i1-Q4_K_M.gguf": ["mradermacher/Insanity-i1-GGUF", MessagesFormatterType.MISTRAL],
|
111 |
+
"Rocinante-12B-v2d-Q4_K_M.gguf": ["TheDrummer/UnslopNemo-v2-GGUF", MessagesFormatterType.MISTRAL],
|
112 |
+
"Fatgirl_8B.Q4_K_M.gguf": ["QuantFactory/Fatgirl_8B-GGUF", MessagesFormatterType.MISTRAL],
|
113 |
+
"stormclimate-q4_k_m.gguf": ["MotherEarth/stormclimate-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
|
114 |
+
"InsanityB.Q4_K_M.gguf": ["mradermacher/InsanityB-GGUF", MessagesFormatterType.MISTRAL],
|
115 |
+
"mistral-nemo-gutades-12B.Q4_K_M.gguf": ["mradermacher/mistral-nemo-gutades-12B-GGUF", MessagesFormatterType.MISTRAL],
|
116 |
+
"FourFictionGemma-9.Q5_K_M.gguf": ["mradermacher/FourFictionGemma-9-GGUF", MessagesFormatterType.ALPACA],
|
117 |
+
"blackmagnumataraxy-9b-q6_k.gguf": ["BarBarickoza/BlackMagnumAtaraxy-9B-Q6_K-GGUF", MessagesFormatterType.ALPACA],
|
118 |
+
"fourfastgemma-9-q4_k_m-imat.gguf": ["ClaudioItaly/FourFastGemma-9-Q4_K_M-GGUF", MessagesFormatterType.ALPACA],
|
119 |
+
"ThinkingMistral-gen.i1-Q4_K_M.gguf": ["mradermacher/ThinkingMistral-gen-i1-GGUF", MessagesFormatterType.MISTRAL],
|
120 |
+
"L3.1-Dark-Planet-10.7B-ExxxxxxxxTended-D_AU-Q4_k_m.gguf": ["DavidAU/L3.1-Dark-Planet-10.7B-ExxxxxxxxTended-GGUF", MessagesFormatterType.LLAMA_3],
|
121 |
+
"Gemma-The-Writer-9B-D_AU-Q4_k_m.gguf": ["DavidAU/Gemma-The-Writer-9B-GGUF", MessagesFormatterType.ALPACA],
|
122 |
+
"CleverBoi-Nemo-12B-v2.i1-Q4_K_M.gguf": ["mradermacher/CleverBoi-Nemo-12B-v2-i1-GGUF", MessagesFormatterType.MISTRAL],
|
123 |
+
"ORPO-EdgeRunner-Tactical-7B-GSM8K.Q5_K_M.gguf": ["mradermacher/ORPO-EdgeRunner-Tactical-7B-GSM8K-GGUF", MessagesFormatterType.OPEN_CHAT],
|
124 |
+
"wip-test_pending_c-q4_k_m.gguf": ["DreadPoor/WIP-TEST_PENDING_C-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
|
125 |
+
"L3.1-8B-komorebi-q8_0.gguf": ["crestf411/L3.1-8B-komorebi-gguf", MessagesFormatterType.LLAMA_3],
|
126 |
+
"Canada_Reimbursement_Model.Q5_K_M.gguf": ["mradermacher/Canada_Reimbursement_Model-GGUF", MessagesFormatterType.LLAMA_3],
|
127 |
+
"BuddyGlassNeverSleeps.Q5_K_M.gguf": ["mradermacher/BuddyGlassNeverSleeps-GGUF", MessagesFormatterType.LLAMA_3],
|
128 |
+
"gemma-writer-stock-no-ifable-9b-q6_k.gguf": ["BarBarickoza/Gemma-writer-stock-no-Ifable-9b-Q6_K-GGUF", MessagesFormatterType.ALPACA],
|
129 |
+
"Gemma-Ataraxy-Dare-9b.i1-Q4_K_M.gguf": ["mradermacher/Gemma-Ataraxy-Dare-9b-i1-GGUF", MessagesFormatterType.ALPACA],
|
130 |
+
"MotherEarth-Hermes-8B.i1-Q5_K_M.gguf": ["mradermacher/MotherEarth-Hermes-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
131 |
+
"albacus-q5_k_m-imat.gguf": ["ClaudioItaly/Albacus-V2-Imatrix", MessagesFormatterType.MISTRAL],
|
132 |
+
"mn-12b-siskin-test2-q8_0.gguf": ["Nohobby/MN-12B-Siskin-TEST2-Q8_0-GGUF", MessagesFormatterType.MISTRAL],
|
133 |
+
"mn-12b-siskin-test3-q8_0.gguf": ["Nohobby/MN-12B-Siskin-TEST3-Q8_0-GGUF", MessagesFormatterType.MISTRAL],
|
134 |
+
"Violet_Twilight-v0.2.Q4_K_M.gguf": ["Epiculous/Violet_Twilight-v0.2-GGUF", MessagesFormatterType.CHATML],
|
135 |
+
"albacus-q4_k_m-imat.gguf": ["ClaudioItaly/Albacus-Imatrix-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
|
136 |
+
"mn-chinofun-q4_k_m.gguf": ["djuna/MN-Chinofun-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
|
137 |
+
"NarraThinker12B.i1-Q4_K_M.gguf": ["mradermacher/NarraThinker12B-i1-GGUF", MessagesFormatterType.MISTRAL],
|
138 |
+
"IceDrinkNameNotFoundRP-7b-Model_Stock.Q4_K_S.gguf": ["mradermacher/IceDrinkNameNotFoundRP-7b-Model_Stock-GGUF", MessagesFormatterType.MISTRAL],
|
139 |
+
"Hatheno_Max_1.1-ALT-8B-model_stock.i1-Q5_K_M.gguf": ["mradermacher/Hatheno_Max_1.1-ALT-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
|
140 |
+
"Gluon-8B.i1-Q5_K_M.gguf": ["mradermacher/Gluon-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
141 |
+
"mergekit-model_stock-yhrnwcb-q4_k_m.gguf": ["DreadPoor/WIP-TEST_PENDING_A-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
|
142 |
+
"wip-test_pending_b-q4_k_m.gguf": ["DreadPoor/WIP-TEST_PENDING_B-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
|
143 |
+
"Hatheno_Max_1.1-8B-model_stock.i1-Q5_K_M.gguf": ["mradermacher/Hatheno_Max_1.1-8B-model_stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
144 |
+
"MN-12B-Siskin-v0.2.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Siskin-v0.2-i1-GGUF", MessagesFormatterType.MISTRAL],
|
145 |
+
"Hatheno_Max-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Hatheno_Max-8B-model_stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
146 |
+
"L3.1-SuperNovabliterated-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/L3.1-SuperNovabliterated-8B-model_stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
147 |
+
"Hatheno_Max-ALT-8B-model_stock.i1-Q5_K_M.gguf": ["mradermacher/Hatheno_Max-ALT-8B-model_stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
148 |
+
"experiment_x-wip-q4_k_m.gguf": ["DreadPoor/EXPERIMENT_X-WIP-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
|
149 |
+
"narrathinker12b-q4_k_m.gguf": ["ClaudioItaly/NarraThinker12B-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
|
150 |
+
"llama-3.1-8b-matrix-q5_k_m.gguf": ["bunnycore/LLama-3.1-8B-Matrix-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
|
151 |
+
"Barcenas-8b-Cartas.Q5_K_M.gguf": ["mradermacher/Barcenas-8b-Cartas-GGUF", MessagesFormatterType.LLAMA_3],
|
152 |
+
"HannaOpenHermes-2.5-Mistral-7B.Q5_K_M.gguf": ["mradermacher/HannaOpenHermes-2.5-Mistral-7B-GGUF", MessagesFormatterType.MISTRAL],
|
153 |
+
"IceDrinkNameGoesHereRP-7b-Model_Stock.i1-Q4_K_M.gguf": ["mradermacher/IceDrinkNameGoesHereRP-7b-Model_Stock-i1-GGUF", MessagesFormatterType.ALPACA],
|
154 |
+
"Llama-3.1-Literotica-8B.Q4_K_S.gguf": ["mradermacher/Llama-3.1-Literotica-8B-GGUF", MessagesFormatterType.LLAMA_3],
|
155 |
+
"project-12-q4_k_m.gguf": ["ClaudioItaly/Project-12-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
|
156 |
+
"L3.1-Celestial-Stone-2x8B.i1-Q4_K_M.gguf": ["mradermacher/L3.1-Celestial-Stone-2x8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
157 |
+
"experiment_y-wip-q4_k_m.gguf": ["DreadPoor/EXPERIMENT_Y-WIP-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
|
158 |
"L3.1-Vulca-Epith-Bluegrade-v0.2-8B.q8_0.gguf": ["kromquant/L3.1-Vulca-Epith-Bluegrade-v0.2-8B-GGUFs", MessagesFormatterType.LLAMA_3],
|
159 |
"llama-3.1-8b-omnimatrix-iq4_nl-imat.gguf": ["bunnycore/Llama-3.1-8B-OmniMatrix-IQ4_NL-GGUF", MessagesFormatterType.LLAMA_3],
|
160 |
"L3.1-Artemis-d-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Artemis-d-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
|
|
583 |
"Meta-Llama-3.1-8B-Claude-iMat-Q5_K_M.gguf": ["InferenceIllusionist/Meta-Llama-3.1-8B-Claude-iMat-GGUF", MessagesFormatterType.LLAMA_3],
|
584 |
"Phi-3.1-mini-128k-instruct-Q6_K_L.gguf": ["bartowski/Phi-3.1-mini-128k-instruct-GGUF", MessagesFormatterType.PHI_3],
|
585 |
"tifa-7b-qwen2-v0.1.q4_k_m.gguf": ["Tifa-RP/Tifa-7B-Qwen2-v0.1-GGUF", MessagesFormatterType.OPEN_CHAT],
|
586 |
+
"Holland-Magnum-Merge-R2.i1-Q5_K_M.gguf": ["mradermacher/Holland-Magnum-Merge-R2-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
587 |
"Oumuamua-7b-RP_Q5_K_M.gguf": ["Aratako/Oumuamua-7b-RP-GGUF", MessagesFormatterType.MISTRAL],
|
588 |
+
"Berghof-NSFW-7B.Q5_K_M.gguf": ["QuantFactory/Berghof-NSFW-7B-GGUF", MessagesFormatterType.MISTRAL],
|
589 |
"Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW_iMat_Ch200_IQ4_XS.gguf": ["dddump/Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW-gguf", MessagesFormatterType.VICUNA],
|
590 |
"ChatWaifu_v1.2.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.2.1-GGUF", MessagesFormatterType.MISTRAL],
|
591 |
"ChatWaifu_v1.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.1-GGUF", MessagesFormatterType.MISTRAL],
|
|
|
1191 |
else:
|
1192 |
prompts = list_uniq(to_list(raw_prompt) + ["nsfw", "explicit", "rating_explicit"])
|
1193 |
return ", ".join(prompts)
|
1194 |
+
|
1195 |
+
|
1196 |
+
# https://huggingface.co/spaces/CaioXapelaum/GGUF-Playground
|
1197 |
+
import cv2
|
1198 |
+
cv2.setNumThreads(1)
|
1199 |
+
|
1200 |
+
@spaces.GPU()
|
1201 |
+
def respond_playground(
|
1202 |
+
message,
|
1203 |
+
history: list[tuple[str, str]],
|
1204 |
+
model,
|
1205 |
+
system_message,
|
1206 |
+
max_tokens,
|
1207 |
+
temperature,
|
1208 |
+
top_p,
|
1209 |
+
top_k,
|
1210 |
+
repeat_penalty,
|
1211 |
+
):
|
1212 |
+
if override_llm_format:
|
1213 |
+
chat_template = override_llm_format
|
1214 |
+
else:
|
1215 |
+
chat_template = llm_models[model][1]
|
1216 |
+
|
1217 |
+
llm = Llama(
|
1218 |
+
model_path=str(Path(f"{llm_models_dir}/{model}")),
|
1219 |
+
flash_attn=True,
|
1220 |
+
n_gpu_layers=81, # 81
|
1221 |
+
n_batch=1024,
|
1222 |
+
n_ctx=8192, #8192
|
1223 |
+
)
|
1224 |
+
provider = LlamaCppPythonProvider(llm)
|
1225 |
+
|
1226 |
+
agent = LlamaCppAgent(
|
1227 |
+
provider,
|
1228 |
+
system_prompt=f"{system_message}",
|
1229 |
+
predefined_messages_formatter_type=chat_template,
|
1230 |
+
debug_output=False
|
1231 |
+
)
|
1232 |
+
|
1233 |
+
settings = provider.get_provider_default_settings()
|
1234 |
+
settings.temperature = temperature
|
1235 |
+
settings.top_k = top_k
|
1236 |
+
settings.top_p = top_p
|
1237 |
+
settings.max_tokens = max_tokens
|
1238 |
+
settings.repeat_penalty = repeat_penalty
|
1239 |
+
settings.stream = True
|
1240 |
+
|
1241 |
+
messages = BasicChatHistory()
|
1242 |
+
|
1243 |
+
# Add user and assistant messages to the history
|
1244 |
+
for msn in history:
|
1245 |
+
user = {'role': Roles.user, 'content': msn[0]}
|
1246 |
+
assistant = {'role': Roles.assistant, 'content': msn[1]}
|
1247 |
+
messages.add_message(user)
|
1248 |
+
messages.add_message(assistant)
|
1249 |
+
|
1250 |
+
# Stream the response
|
1251 |
+
try:
|
1252 |
+
stream = agent.get_chat_response(
|
1253 |
+
message,
|
1254 |
+
llm_sampling_settings=settings,
|
1255 |
+
chat_history=messages,
|
1256 |
+
returns_streaming_generator=True,
|
1257 |
+
print_output=False
|
1258 |
+
)
|
1259 |
+
|
1260 |
+
outputs = ""
|
1261 |
+
for output in stream:
|
1262 |
+
outputs += output
|
1263 |
+
yield outputs
|
1264 |
+
except Exception as e:
|
1265 |
+
yield f"Error during response generation: {str(e)}"
|
modutils.py
CHANGED
@@ -4,13 +4,21 @@ import gradio as gr
|
|
4 |
from huggingface_hub import HfApi
|
5 |
import os
|
6 |
from pathlib import Path
|
|
|
7 |
|
8 |
|
9 |
from env import (HF_LORA_PRIVATE_REPOS1, HF_LORA_PRIVATE_REPOS2,
|
10 |
-
HF_MODEL_USER_EX, HF_MODEL_USER_LIKES,
|
11 |
directory_loras, hf_read_token, HF_TOKEN, CIVITAI_API_KEY)
|
12 |
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
def get_user_agent():
|
15 |
return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
|
16 |
|
@@ -27,6 +35,11 @@ def list_sub(a, b):
|
|
27 |
return [e for e in a if e not in b]
|
28 |
|
29 |
|
|
|
|
|
|
|
|
|
|
|
30 |
from translatepy import Translator
|
31 |
translator = Translator()
|
32 |
def translate_to_en(input: str):
|
@@ -64,7 +77,7 @@ def download_things(directory, url, hf_token="", civitai_api_key=""):
|
|
64 |
if hf_token:
|
65 |
os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
|
66 |
else:
|
67 |
-
os.system
|
68 |
elif "civitai.com" in url:
|
69 |
if "?" in url:
|
70 |
url = url.split("?")[0]
|
@@ -100,7 +113,6 @@ def safe_float(input):
|
|
100 |
return output
|
101 |
|
102 |
|
103 |
-
from PIL import Image
|
104 |
def save_images(images: list[Image.Image], metadatas: list[str]):
|
105 |
from PIL import PngImagePlugin
|
106 |
import uuid
|
@@ -245,10 +257,10 @@ model_id_list = get_model_id_list()
|
|
245 |
|
246 |
|
247 |
def get_t2i_model_info(repo_id: str):
|
248 |
-
api = HfApi()
|
249 |
try:
|
250 |
-
if
|
251 |
-
model = api.model_info(repo_id=repo_id)
|
252 |
except Exception as e:
|
253 |
print(f"Error: Failed to get {repo_id}'s info.")
|
254 |
print(e)
|
@@ -258,9 +270,8 @@ def get_t2i_model_info(repo_id: str):
|
|
258 |
info = []
|
259 |
url = f"https://huggingface.co/{repo_id}/"
|
260 |
if not 'diffusers' in tags: return ""
|
261 |
-
|
262 |
-
|
263 |
-
elif 'diffusers:StableDiffusionPipeline' in tags: info.append("SD1.5")
|
264 |
if model.card_data and model.card_data.tags:
|
265 |
info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
|
266 |
info.append(f"DLs: {model.downloads}")
|
@@ -285,12 +296,8 @@ def get_tupled_model_list(model_list):
|
|
285 |
tags = model.tags
|
286 |
info = []
|
287 |
if not 'diffusers' in tags: continue
|
288 |
-
|
289 |
-
info.append(
|
290 |
-
if 'diffusers:StableDiffusionXLPipeline' in tags:
|
291 |
-
info.append("SDXL")
|
292 |
-
elif 'diffusers:StableDiffusionPipeline' in tags:
|
293 |
-
info.append("SD1.5")
|
294 |
if model.card_data and model.card_data.tags:
|
295 |
info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
|
296 |
if "pony" in info:
|
@@ -374,7 +381,7 @@ def get_civitai_info(path):
|
|
374 |
|
375 |
|
376 |
def get_lora_model_list():
|
377 |
-
loras = list_uniq(get_private_lora_model_lists() + get_local_model_list(directory_loras))
|
378 |
loras.insert(0, "None")
|
379 |
loras.insert(0, "")
|
380 |
return loras
|
@@ -483,7 +490,7 @@ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: st
|
|
483 |
gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
|
484 |
|
485 |
|
486 |
-
def get_valid_lora_name(query: str):
|
487 |
path = "None"
|
488 |
if not query or query == "None": return "None"
|
489 |
if to_lora_key(query) in loras_dict.keys(): return query
|
@@ -497,7 +504,7 @@ def get_valid_lora_name(query: str):
|
|
497 |
dl_file = download_lora(query)
|
498 |
if dl_file and Path(dl_file).exists(): return dl_file
|
499 |
else:
|
500 |
-
dl_file = find_similar_lora(query)
|
501 |
if dl_file and Path(dl_file).exists(): return dl_file
|
502 |
return "None"
|
503 |
|
@@ -521,14 +528,14 @@ def get_valid_lora_wt(prompt: str, lora_path: str, lora_wt: float):
|
|
521 |
return wt
|
522 |
|
523 |
|
524 |
-
def set_prompt_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
|
525 |
import re
|
526 |
if not "Classic" in str(prompt_syntax): return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
|
527 |
-
lora1 = get_valid_lora_name(lora1)
|
528 |
-
lora2 = get_valid_lora_name(lora2)
|
529 |
-
lora3 = get_valid_lora_name(lora3)
|
530 |
-
lora4 = get_valid_lora_name(lora4)
|
531 |
-
lora5 = get_valid_lora_name(lora5)
|
532 |
if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
|
533 |
lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
|
534 |
lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
|
@@ -790,16 +797,17 @@ def get_civitai_info(path):
|
|
790 |
return items
|
791 |
|
792 |
|
793 |
-
def search_lora_on_civitai(query: str, allow_model: list[str] = ["Pony", "SDXL 1.0"], limit: int = 100
|
|
|
794 |
import requests
|
795 |
from requests.adapters import HTTPAdapter
|
796 |
from urllib3.util import Retry
|
797 |
-
if not query: return None
|
798 |
user_agent = get_user_agent()
|
799 |
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
|
800 |
base_url = 'https://civitai.com/api/v1/models'
|
801 |
-
params = {'
|
802 |
-
|
|
|
803 |
session = requests.Session()
|
804 |
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
|
805 |
session.mount("https://", HTTPAdapter(max_retries=retries))
|
@@ -828,9 +836,9 @@ def search_lora_on_civitai(query: str, allow_model: list[str] = ["Pony", "SDXL 1
|
|
828 |
return items
|
829 |
|
830 |
|
831 |
-
def search_civitai_lora(query, base_model):
|
832 |
global civitai_lora_last_results
|
833 |
-
items = search_lora_on_civitai(query, base_model)
|
834 |
if not items: return gr.update(choices=[("", "")], value="", visible=False),\
|
835 |
gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
|
836 |
civitai_lora_last_results = {}
|
@@ -856,7 +864,27 @@ def select_civitai_lora(search_result):
|
|
856 |
return gr.update(value=search_result), gr.update(value=md, visible=True)
|
857 |
|
858 |
|
859 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
860 |
from rapidfuzz.process import extractOne
|
861 |
from rapidfuzz.utils import default_process
|
862 |
query = to_lora_key(q)
|
@@ -879,7 +907,7 @@ def find_similar_lora(q: str):
|
|
879 |
print(f"Finding <lora:{query}:...> on Civitai...")
|
880 |
civitai_query = Path(query).stem if Path(query).is_file() else query
|
881 |
civitai_query = civitai_query.replace("_", " ").replace("-", " ")
|
882 |
-
base_model =
|
883 |
items = search_lora_on_civitai(civitai_query, base_model, 1)
|
884 |
if items:
|
885 |
item = items[0]
|
@@ -1241,11 +1269,11 @@ def set_textual_inversion_prompt(textual_inversion_gui, prompt_gui, neg_prompt_g
|
|
1241 |
|
1242 |
def get_model_pipeline(repo_id: str):
|
1243 |
from huggingface_hub import HfApi
|
1244 |
-
api = HfApi()
|
1245 |
default = "StableDiffusionPipeline"
|
1246 |
try:
|
1247 |
-
if
|
1248 |
-
model = api.model_info(repo_id=repo_id)
|
1249 |
except Exception:
|
1250 |
return default
|
1251 |
if model.private or model.gated: return default
|
|
|
4 |
from huggingface_hub import HfApi
|
5 |
import os
|
6 |
from pathlib import Path
|
7 |
+
from PIL import Image
|
8 |
|
9 |
|
10 |
from env import (HF_LORA_PRIVATE_REPOS1, HF_LORA_PRIVATE_REPOS2,
|
11 |
+
HF_MODEL_USER_EX, HF_MODEL_USER_LIKES, DIFFUSERS_FORMAT_LORAS,
|
12 |
directory_loras, hf_read_token, HF_TOKEN, CIVITAI_API_KEY)
|
13 |
|
14 |
|
15 |
+
MODEL_TYPE_DICT = {
|
16 |
+
"diffusers:StableDiffusionPipeline": "SD 1.5",
|
17 |
+
"diffusers:StableDiffusionXLPipeline": "SDXL",
|
18 |
+
"diffusers:FluxPipeline": "FLUX",
|
19 |
+
}
|
20 |
+
|
21 |
+
|
22 |
def get_user_agent():
|
23 |
return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
|
24 |
|
|
|
35 |
return [e for e in a if e not in b]
|
36 |
|
37 |
|
38 |
+
def is_repo_name(s):
|
39 |
+
import re
|
40 |
+
return re.fullmatch(r'^[^/]+?/[^/]+?$', s)
|
41 |
+
|
42 |
+
|
43 |
from translatepy import Translator
|
44 |
translator = Translator()
|
45 |
def translate_to_en(input: str):
|
|
|
77 |
if hf_token:
|
78 |
os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
|
79 |
else:
|
80 |
+
os.system(f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
|
81 |
elif "civitai.com" in url:
|
82 |
if "?" in url:
|
83 |
url = url.split("?")[0]
|
|
|
113 |
return output
|
114 |
|
115 |
|
|
|
116 |
def save_images(images: list[Image.Image], metadatas: list[str]):
|
117 |
from PIL import PngImagePlugin
|
118 |
import uuid
|
|
|
257 |
|
258 |
|
259 |
def get_t2i_model_info(repo_id: str):
|
260 |
+
api = HfApi(token=HF_TOKEN)
|
261 |
try:
|
262 |
+
if not is_repo_name(repo_id): return ""
|
263 |
+
model = api.model_info(repo_id=repo_id, timeout=5.0)
|
264 |
except Exception as e:
|
265 |
print(f"Error: Failed to get {repo_id}'s info.")
|
266 |
print(e)
|
|
|
270 |
info = []
|
271 |
url = f"https://huggingface.co/{repo_id}/"
|
272 |
if not 'diffusers' in tags: return ""
|
273 |
+
for k, v in MODEL_TYPE_DICT.items():
|
274 |
+
if k in tags: info.append(v)
|
|
|
275 |
if model.card_data and model.card_data.tags:
|
276 |
info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
|
277 |
info.append(f"DLs: {model.downloads}")
|
|
|
296 |
tags = model.tags
|
297 |
info = []
|
298 |
if not 'diffusers' in tags: continue
|
299 |
+
for k, v in MODEL_TYPE_DICT.items():
|
300 |
+
if k in tags: info.append(v)
|
|
|
|
|
|
|
|
|
301 |
if model.card_data and model.card_data.tags:
|
302 |
info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
|
303 |
if "pony" in info:
|
|
|
381 |
|
382 |
|
383 |
def get_lora_model_list():
|
384 |
+
loras = list_uniq(get_private_lora_model_lists() + get_local_model_list(directory_loras) + DIFFUSERS_FORMAT_LORAS)
|
385 |
loras.insert(0, "None")
|
386 |
loras.insert(0, "")
|
387 |
return loras
|
|
|
490 |
gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
|
491 |
|
492 |
|
493 |
+
def get_valid_lora_name(query: str, model_name: str):
|
494 |
path = "None"
|
495 |
if not query or query == "None": return "None"
|
496 |
if to_lora_key(query) in loras_dict.keys(): return query
|
|
|
504 |
dl_file = download_lora(query)
|
505 |
if dl_file and Path(dl_file).exists(): return dl_file
|
506 |
else:
|
507 |
+
dl_file = find_similar_lora(query, model_name)
|
508 |
if dl_file and Path(dl_file).exists(): return dl_file
|
509 |
return "None"
|
510 |
|
|
|
528 |
return wt
|
529 |
|
530 |
|
531 |
+
def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
|
532 |
import re
|
533 |
if not "Classic" in str(prompt_syntax): return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
|
534 |
+
lora1 = get_valid_lora_name(lora1, model_name)
|
535 |
+
lora2 = get_valid_lora_name(lora2, model_name)
|
536 |
+
lora3 = get_valid_lora_name(lora3, model_name)
|
537 |
+
lora4 = get_valid_lora_name(lora4, model_name)
|
538 |
+
lora5 = get_valid_lora_name(lora5, model_name)
|
539 |
if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
|
540 |
lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
|
541 |
lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
|
|
|
797 |
return items
|
798 |
|
799 |
|
800 |
+
def search_lora_on_civitai(query: str, allow_model: list[str] = ["Pony", "SDXL 1.0"], limit: int = 100,
|
801 |
+
sort: str = "Highest Rated", period: str = "AllTime", tag: str = ""):
|
802 |
import requests
|
803 |
from requests.adapters import HTTPAdapter
|
804 |
from urllib3.util import Retry
|
|
|
805 |
user_agent = get_user_agent()
|
806 |
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
|
807 |
base_url = 'https://civitai.com/api/v1/models'
|
808 |
+
params = {'types': ['LORA'], 'sort': sort, 'period': period, 'limit': limit, 'nsfw': 'true'}
|
809 |
+
if query: params["query"] = query
|
810 |
+
if tag: params["tag"] = tag
|
811 |
session = requests.Session()
|
812 |
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
|
813 |
session.mount("https://", HTTPAdapter(max_retries=retries))
|
|
|
836 |
return items
|
837 |
|
838 |
|
839 |
+
def search_civitai_lora(query, base_model, sort="Highest Rated", period="AllTime", tag=""):
|
840 |
global civitai_lora_last_results
|
841 |
+
items = search_lora_on_civitai(query, base_model, 100, sort, period, tag)
|
842 |
if not items: return gr.update(choices=[("", "")], value="", visible=False),\
|
843 |
gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
|
844 |
civitai_lora_last_results = {}
|
|
|
864 |
return gr.update(value=search_result), gr.update(value=md, visible=True)
|
865 |
|
866 |
|
867 |
+
LORA_BASE_MODEL_DICT = {
|
868 |
+
"diffusers:StableDiffusionPipeline": ["SD 1.5"],
|
869 |
+
"diffusers:StableDiffusionXLPipeline": ["Pony", "SDXL 1.0"],
|
870 |
+
"diffusers:FluxPipeline": ["Flux.1 D", "Flux.1 S"],
|
871 |
+
}
|
872 |
+
|
873 |
+
|
874 |
+
def get_lora_base_model(model_name: str):
|
875 |
+
api = HfApi(token=HF_TOKEN)
|
876 |
+
default = ["Pony", "SDXL 1.0"]
|
877 |
+
try:
|
878 |
+
model = api.model_info(repo_id=model_name, timeout=5.0)
|
879 |
+
tags = model.tags
|
880 |
+
for tag in tags:
|
881 |
+
if tag in LORA_BASE_MODEL_DICT.keys(): return LORA_BASE_MODEL_DICT.get(tag, default)
|
882 |
+
except Exception:
|
883 |
+
return default
|
884 |
+
return default
|
885 |
+
|
886 |
+
|
887 |
+
def find_similar_lora(q: str, model_name: str):
|
888 |
from rapidfuzz.process import extractOne
|
889 |
from rapidfuzz.utils import default_process
|
890 |
query = to_lora_key(q)
|
|
|
907 |
print(f"Finding <lora:{query}:...> on Civitai...")
|
908 |
civitai_query = Path(query).stem if Path(query).is_file() else query
|
909 |
civitai_query = civitai_query.replace("_", " ").replace("-", " ")
|
910 |
+
base_model = get_lora_base_model(model_name)
|
911 |
items = search_lora_on_civitai(civitai_query, base_model, 1)
|
912 |
if items:
|
913 |
item = items[0]
|
|
|
1269 |
|
1270 |
def get_model_pipeline(repo_id: str):
|
1271 |
from huggingface_hub import HfApi
|
1272 |
+
api = HfApi(token=HF_TOKEN)
|
1273 |
default = "StableDiffusionPipeline"
|
1274 |
try:
|
1275 |
+
if not is_repo_name(repo_id): return default
|
1276 |
+
model = api.model_info(repo_id=repo_id, timeout=5.0)
|
1277 |
except Exception:
|
1278 |
return default
|
1279 |
if model.private or model.gated: return default
|