Spaces:
Running
on
Zero
Running
on
Zero
Upload 12 files
Browse files- app.py +5 -1
- modutils.py +23 -3
- tagger/tagger.py +9 -16
app.py
CHANGED
@@ -365,7 +365,6 @@ class GuiSD:
|
|
365 |
lora1, lora_scale1, lora2, lora_scale2, lora3, lora_scale3, lora4, lora_scale4, lora5, lora_scale5 = \
|
366 |
set_prompt_loras(prompt, syntax_weights, model_name, lora1, lora_scale1, lora2, lora_scale2, lora3,
|
367 |
lora_scale3, lora4, lora_scale4, lora5, lora_scale5)
|
368 |
-
prompt, neg_prompt = insert_model_recom_prompt(prompt, neg_prompt, model_name)
|
369 |
## END MOD
|
370 |
|
371 |
print("Config model:", model_name, vae_model, loras_list)
|
@@ -1422,6 +1421,11 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
|
|
1422 |
translate_prompt_button.click(translate_prompt, [series_dbt], [series_dbt], queue=False)
|
1423 |
|
1424 |
generate_button.click(
|
|
|
|
|
|
|
|
|
|
|
1425 |
fn=sd_gen.load_new_model,
|
1426 |
inputs=[
|
1427 |
model_name_gui,
|
|
|
365 |
lora1, lora_scale1, lora2, lora_scale2, lora3, lora_scale3, lora4, lora_scale4, lora5, lora_scale5 = \
|
366 |
set_prompt_loras(prompt, syntax_weights, model_name, lora1, lora_scale1, lora2, lora_scale2, lora3,
|
367 |
lora_scale3, lora4, lora_scale4, lora5, lora_scale5)
|
|
|
368 |
## END MOD
|
369 |
|
370 |
print("Config model:", model_name, vae_model, loras_list)
|
|
|
1421 |
translate_prompt_button.click(translate_prompt, [series_dbt], [series_dbt], queue=False)
|
1422 |
|
1423 |
generate_button.click(
|
1424 |
+
fn=insert_model_recom_prompt,
|
1425 |
+
inputs=[prompt_gui, neg_prompt_gui, model_name_gui, recom_prompt_gui],
|
1426 |
+
outputs=[prompt_gui, neg_prompt_gui],
|
1427 |
+
queue=False,
|
1428 |
+
).success(
|
1429 |
fn=sd_gen.load_new_model,
|
1430 |
inputs=[
|
1431 |
model_name_gui,
|
modutils.py
CHANGED
@@ -12,6 +12,7 @@ from requests.adapters import HTTPAdapter
|
|
12 |
from urllib3.util import Retry
|
13 |
import urllib.parse
|
14 |
import pandas as pd
|
|
|
15 |
from huggingface_hub import HfApi, HfFolder, hf_hub_download, snapshot_download
|
16 |
from translatepy import Translator
|
17 |
from unidecode import unidecode
|
@@ -52,6 +53,25 @@ def is_repo_name(s):
|
|
52 |
return re.fullmatch(r'^[^/]+?/[^/]+?$', s)
|
53 |
|
54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
translator = Translator()
|
56 |
def translate_to_en(input: str):
|
57 |
try:
|
@@ -753,10 +773,10 @@ def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2,
|
|
753 |
key = result[0][0]
|
754 |
wt = result[0][1]
|
755 |
path = to_lora_path(key)
|
756 |
-
if not key in loras_dict.keys() or not path:
|
757 |
path = get_valid_lora_name(path)
|
758 |
if not path or path == "None": continue
|
759 |
-
if path in lora_paths:
|
760 |
continue
|
761 |
elif not on1:
|
762 |
lora1 = path
|
@@ -777,7 +797,7 @@ def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2,
|
|
777 |
lora4 = path
|
778 |
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
779 |
lora4_wt = safe_float(wt)
|
780 |
-
on4
|
781 |
elif not on5:
|
782 |
lora5 = path
|
783 |
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
|
|
12 |
from urllib3.util import Retry
|
13 |
import urllib.parse
|
14 |
import pandas as pd
|
15 |
+
from typing import Any
|
16 |
from huggingface_hub import HfApi, HfFolder, hf_hub_download, snapshot_download
|
17 |
from translatepy import Translator
|
18 |
from unidecode import unidecode
|
|
|
53 |
return re.fullmatch(r'^[^/]+?/[^/]+?$', s)
|
54 |
|
55 |
|
56 |
+
DEFAULT_STATE = {
|
57 |
+
"show_diffusers_model_list_detail": False,
|
58 |
+
}
|
59 |
+
|
60 |
+
|
61 |
+
def get_state(state: dict, key: str):
|
62 |
+
if key in state.keys(): return state[key]
|
63 |
+
elif key in DEFAULT_STATE.keys():
|
64 |
+
print(f"State '{key}' not found. Use dedault value.")
|
65 |
+
return DEFAULT_STATE[key]
|
66 |
+
else:
|
67 |
+
print(f"State '{key}' not found.")
|
68 |
+
return None
|
69 |
+
|
70 |
+
|
71 |
+
def set_state(state: dict, key: str, value: Any):
|
72 |
+
state[key] = value
|
73 |
+
|
74 |
+
|
75 |
translator = Translator()
|
76 |
def translate_to_en(input: str):
|
77 |
try:
|
|
|
773 |
key = result[0][0]
|
774 |
wt = result[0][1]
|
775 |
path = to_lora_path(key)
|
776 |
+
if not key in loras_dict.keys() or not Path(path).exists():
|
777 |
path = get_valid_lora_name(path)
|
778 |
if not path or path == "None": continue
|
779 |
+
if path in lora_paths or key in lora_paths:
|
780 |
continue
|
781 |
elif not on1:
|
782 |
lora1 = path
|
|
|
797 |
lora4 = path
|
798 |
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
799 |
lora4_wt = safe_float(wt)
|
800 |
+
on4 = True
|
801 |
elif not on5:
|
802 |
lora5 = path
|
803 |
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
tagger/tagger.py
CHANGED
@@ -285,9 +285,6 @@ def convert_tags_to_ja(input_prompt: str = ""):
|
|
285 |
return ", ".join(out_tags)
|
286 |
|
287 |
|
288 |
-
enable_auto_recom_prompt = True
|
289 |
-
|
290 |
-
|
291 |
animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
|
292 |
animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
|
293 |
pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
|
@@ -297,7 +294,6 @@ other_nps = to_list("photo, deformed, black and white, realism, disfigured, low
|
|
297 |
default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
|
298 |
default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
|
299 |
def insert_recom_prompt(prompt: str = "", neg_prompt: str = "", type: str = "None"):
|
300 |
-
global enable_auto_recom_prompt
|
301 |
prompts = to_list(prompt)
|
302 |
neg_prompts = to_list(neg_prompt)
|
303 |
|
@@ -307,16 +303,12 @@ def insert_recom_prompt(prompt: str = "", neg_prompt: str = "", type: str = "Non
|
|
307 |
last_empty_p = [""] if not prompts and type != "None" else []
|
308 |
last_empty_np = [""] if not neg_prompts and type != "None" else []
|
309 |
|
310 |
-
if type == "
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
neg_prompts = neg_prompts + animagine_nps
|
317 |
-
elif type == "Pony":
|
318 |
-
prompts = prompts + pony_ps
|
319 |
-
neg_prompts = neg_prompts + pony_nps
|
320 |
|
321 |
prompt = ", ".join(list_uniq(prompts) + last_empty_p)
|
322 |
neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
|
@@ -329,7 +321,7 @@ def load_model_prompt_dict():
|
|
329 |
dict = {}
|
330 |
path = 'model_dict.json' if Path('model_dict.json').exists() else './tagger/model_dict.json'
|
331 |
try:
|
332 |
-
with open(
|
333 |
dict = json.load(f)
|
334 |
except Exception:
|
335 |
pass
|
@@ -339,7 +331,8 @@ def load_model_prompt_dict():
|
|
339 |
model_prompt_dict = load_model_prompt_dict()
|
340 |
|
341 |
|
342 |
-
def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None"):
|
|
|
343 |
if not model_name or not enable_auto_recom_prompt: return prompt, neg_prompt
|
344 |
prompts = to_list(prompt)
|
345 |
neg_prompts = to_list(neg_prompt)
|
|
|
285 |
return ", ".join(out_tags)
|
286 |
|
287 |
|
|
|
|
|
|
|
288 |
animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
|
289 |
animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
|
290 |
pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
|
|
|
294 |
default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
|
295 |
default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
|
296 |
def insert_recom_prompt(prompt: str = "", neg_prompt: str = "", type: str = "None"):
|
|
|
297 |
prompts = to_list(prompt)
|
298 |
neg_prompts = to_list(neg_prompt)
|
299 |
|
|
|
303 |
last_empty_p = [""] if not prompts and type != "None" else []
|
304 |
last_empty_np = [""] if not neg_prompts and type != "None" else []
|
305 |
|
306 |
+
if type == "Animagine":
|
307 |
+
prompts = prompts + animagine_ps
|
308 |
+
neg_prompts = neg_prompts + animagine_nps
|
309 |
+
elif type == "Pony":
|
310 |
+
prompts = prompts + pony_ps
|
311 |
+
neg_prompts = neg_prompts + pony_nps
|
|
|
|
|
|
|
|
|
312 |
|
313 |
prompt = ", ".join(list_uniq(prompts) + last_empty_p)
|
314 |
neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
|
|
|
321 |
dict = {}
|
322 |
path = 'model_dict.json' if Path('model_dict.json').exists() else './tagger/model_dict.json'
|
323 |
try:
|
324 |
+
with open(path, encoding='utf-8') as f:
|
325 |
dict = json.load(f)
|
326 |
except Exception:
|
327 |
pass
|
|
|
331 |
model_prompt_dict = load_model_prompt_dict()
|
332 |
|
333 |
|
334 |
+
def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None", type = "Auto"):
|
335 |
+
enable_auto_recom_prompt = True if type == "Auto" else False
|
336 |
if not model_name or not enable_auto_recom_prompt: return prompt, neg_prompt
|
337 |
prompts = to_list(prompt)
|
338 |
neg_prompts = to_list(neg_prompt)
|