John6666 commited on
Commit
76dc483
1 Parent(s): 7fa9150

Upload 24 files

Browse files
README.md CHANGED
@@ -1,12 +1,15 @@
1
- ---
2
- title: Testvp
3
- emoji: 👁
4
- colorFrom: pink
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 4.44.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
1
+ ---
2
+ title: Votepurchase Multiple Model (SD1.5/SDXL Text-to-Image)
3
+ emoji: 🖼🖼️📦
4
+ colorFrom: purple
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 4.41.0
8
+ app_file: app.py
9
+ license: mit
10
+ short_description: Text-to-Image
11
+ pinned: true
12
+ hf_oauth: true
13
+ ---
14
+
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ import numpy as np
4
+
5
+ # DiffuseCraft
6
+ from dc import (infer, _infer, pass_result, get_diffusers_model_list, get_samplers,
7
+ get_vaes, enable_model_recom_prompt, enable_diffusers_model_detail,
8
+ get_t2i_model_info, get_all_lora_tupled_list, update_loras,
9
+ apply_lora_prompt, download_my_lora, search_civitai_lora,
10
+ select_civitai_lora, search_civitai_lora_json,
11
+ preset_quality, preset_styles, process_style_prompt)
12
+ # Translator
13
+ from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
14
+ get_llm_formats, get_dolphin_model_format, get_dolphin_models,
15
+ get_dolphin_model_info, select_dolphin_model, select_dolphin_format, get_dolphin_sysprompt)
16
+ # Tagger
17
+ from tagger.v2 import v2_upsampling_prompt, V2_ALL_MODELS
18
+ from tagger.utils import (gradio_copy_text, gradio_copy_prompt, COPY_ACTION_JS,
19
+ V2_ASPECT_RATIO_OPTIONS, V2_RATING_OPTIONS, V2_LENGTH_OPTIONS, V2_IDENTITY_OPTIONS)
20
+ from tagger.tagger import (predict_tags_wd, convert_danbooru_to_e621_prompt,
21
+ remove_specific_prompt, insert_recom_prompt, compose_prompt_to_copy,
22
+ translate_prompt, select_random_character)
23
+ from tagger.fl2sd3longcap import predict_tags_fl2_sd3
24
+ def description_ui():
25
+ gr.Markdown(
26
+ """
27
+ ## Danbooru Tags Transformer V2 Demo with WD Tagger & SD3 Long Captioner
28
+ (Image =>) Prompt => Upsampled longer prompt
29
+ - Mod of p1atdev's [Danbooru Tags Transformer V2 Demo](https://huggingface.co/spaces/p1atdev/danbooru-tags-transformer-v2) and [WD Tagger with 🤗 transformers](https://huggingface.co/spaces/p1atdev/wd-tagger-transformers).
30
+ - Models: p1atdev's [wd-swinv2-tagger-v3-hf](https://huggingface.co/p1atdev/wd-swinv2-tagger-v3-hf), [dart-v2-moe-sft](https://huggingface.co/p1atdev/dart-v2-moe-sft), [dart-v2-sft](https://huggingface.co/p1atdev/dart-v2-sft)\
31
+ , gokaygokay's [Florence-2-SD3-Captioner](https://huggingface.co/gokaygokay/Florence-2-SD3-Captioner)
32
+ """
33
+ )
34
+
35
+
36
+ MAX_SEED = np.iinfo(np.int32).max
37
+ MAX_IMAGE_SIZE = 1216
38
+
39
+ css = """
40
+ #container { margin: 0 auto; !important; }
41
+ #col-container { margin: 0 auto; !important; }
42
+ #result { max-width: 520px; max-height: 520px; margin: 0px auto; !important; }
43
+ .lora { min-width: 480px; !important; }
44
+ #model-info { text-align: center; !important; }
45
+ """
46
+
47
+ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60, 3600)) as demo:
48
+ with gr.Tab("Image Generator"):
49
+ with gr.Column(elem_id="col-container"):
50
+ with gr.Row():
51
+ prompt = gr.Text(label="Prompt", show_label=False, lines=1, max_lines=8, placeholder="Enter your prompt", container=False)
52
+
53
+ with gr.Row():
54
+ run_button = gr.Button("Run", variant="primary", scale=5)
55
+ run_translate_button = gr.Button("Run with LLM Enhance", variant="secondary", scale=3)
56
+ auto_trans = gr.Checkbox(label="Auto translate to English", value=False, scale=2)
57
+
58
+ result = gr.Image(label="Result", elem_id="result", format="png", show_label=False, interactive=False,
59
+ show_download_button=True, show_share_button=False, container=True)
60
+
61
+ with gr.Accordion("Advanced Settings", open=False):
62
+ with gr.Row():
63
+ negative_prompt = gr.Text(label="Negative prompt", lines=1, max_lines=6, placeholder="Enter a negative prompt",
64
+ value="(low quality, worst quality:1.2), very displeasing, watermark, signature, ugly")
65
+
66
+ with gr.Row():
67
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
68
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
69
+
70
+ with gr.Row():
71
+ width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 832
72
+ height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 1216
73
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=30.0, step=0.1, value=7)
74
+ num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=100, step=1, value=28)
75
+
76
+ with gr.Row():
77
+ with gr.Column(scale=4):
78
+ model_name = gr.Dropdown(label="Model", info="You can enter a huggingface model repo_id to want to use.",
79
+ choices=get_diffusers_model_list(), value=get_diffusers_model_list()[0],
80
+ allow_custom_value=True, interactive=True, min_width=320)
81
+ model_info = gr.Markdown(elem_id="model-info")
82
+ with gr.Column(scale=1):
83
+ model_detail = gr.Checkbox(label="Show detail of model in list", value=False)
84
+
85
+ with gr.Row():
86
+ sampler = gr.Dropdown(label="Sampler", choices=get_samplers(), value="Euler a")
87
+ vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
88
+
89
+ with gr.Accordion("LoRA", open=True, visible=True):
90
+ with gr.Row():
91
+ with gr.Column():
92
+ with gr.Row():
93
+ lora1 = gr.Dropdown(label="LoRA 1", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
94
+ lora1_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 1: weight")
95
+ with gr.Row():
96
+ lora1_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
97
+ lora1_copy = gr.Button(value="Copy example to prompt", visible=False)
98
+ lora1_md = gr.Markdown(value="", visible=False)
99
+ with gr.Column():
100
+ with gr.Row():
101
+ lora2 = gr.Dropdown(label="LoRA 2", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
102
+ lora2_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 2: weight")
103
+ with gr.Row():
104
+ lora2_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
105
+ lora2_copy = gr.Button(value="Copy example to prompt", visible=False)
106
+ lora2_md = gr.Markdown(value="", visible=False)
107
+ with gr.Column():
108
+ with gr.Row():
109
+ lora3 = gr.Dropdown(label="LoRA 3", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
110
+ lora3_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 3: weight")
111
+ with gr.Row():
112
+ lora3_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
113
+ lora3_copy = gr.Button(value="Copy example to prompt", visible=False)
114
+ lora3_md = gr.Markdown(value="", visible=False)
115
+ with gr.Column():
116
+ with gr.Row():
117
+ lora4 = gr.Dropdown(label="LoRA 4", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
118
+ lora4_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 4: weight")
119
+ with gr.Row():
120
+ lora4_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
121
+ lora4_copy = gr.Button(value="Copy example to prompt", visible=False)
122
+ lora4_md = gr.Markdown(value="", visible=False)
123
+ with gr.Column():
124
+ with gr.Row():
125
+ lora5 = gr.Dropdown(label="LoRA 5", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
126
+ lora5_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 5: weight")
127
+ with gr.Row():
128
+ lora5_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
129
+ lora5_copy = gr.Button(value="Copy example to prompt", visible=False)
130
+ lora5_md = gr.Markdown(value="", visible=False)
131
+ with gr.Accordion("From URL", open=True, visible=True):
132
+ with gr.Row():
133
+ lora_search_civitai_query = gr.Textbox(label="Query", placeholder="oomuro sakurako...", lines=1)
134
+ lora_search_civitai_basemodel = gr.CheckboxGroup(label="Search LoRA for", choices=["Pony", "SD 1.5", "SDXL 1.0"], value=["Pony", "SDXL 1.0"])
135
+ lora_search_civitai_submit = gr.Button("Search on Civitai")
136
+ with gr.Row():
137
+ lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
138
+ lora_search_civitai_json = gr.JSON(value={}, visible=False)
139
+ lora_search_civitai_desc = gr.Markdown(value="", visible=False)
140
+ lora_download_url = gr.Textbox(label="URL", placeholder="http://...my_lora_url.safetensors", lines=1)
141
+ lora_download = gr.Button("Get and set LoRA and apply to prompt")
142
+
143
+ with gr.Row():
144
+ quality_selector = gr.Radio(label="Quality Tag Presets", interactive=True, choices=list(preset_quality.keys()), value="None", scale=3)
145
+ style_selector = gr.Radio(label="Style Presets", interactive=True, choices=list(preset_styles.keys()), value="None", scale=3)
146
+ recom_prompt = gr.Checkbox(label="Recommended prompt", value=True, scale=1)
147
+
148
+ with gr.Accordion("Translation Settings", open=False):
149
+ chatbot = gr.Chatbot(likeable=False, render_markdown=False, visible=False) # component for auto-translation
150
+ chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0][1], allow_custom_value=True, label="Model")
151
+ chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0][1]), label="Model info")
152
+ chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0][1]), label="Message format")
153
+ with gr.Row():
154
+ chat_tokens = gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max tokens")
155
+ chat_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
156
+ chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
157
+ chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
158
+ chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
159
+ chat_sysmsg = gr.Textbox(value=get_dolphin_sysprompt(), label="System message")
160
+
161
+ examples = gr.Examples(
162
+ examples = [
163
+ ["souryuu asuka langley, 1girl, neon genesis evangelion, plugsuit, pilot suit, red bodysuit, sitting, crossing legs, black eye patch, cat hat, throne, symmetrical, looking down, from bottom, looking at viewer, outdoors"],
164
+ ["sailor moon, magical girl transformation, sparkles and ribbons, soft pastel colors, crescent moon motif, starry night sky background, shoujo manga style"],
165
+ ["kafuu chino, 1girl, solo"],
166
+ ["1girl"],
167
+ ["beautiful sunset"],
168
+ ],
169
+ inputs=[prompt],
170
+ )
171
+
172
+ gr.on( #lambda x: None, inputs=None, outputs=result).then(
173
+ triggers=[run_button.click, prompt.submit],
174
+ fn=infer,
175
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
176
+ guidance_scale, num_inference_steps, model_name,
177
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
178
+ sampler, vae_model, auto_trans],
179
+ outputs=[result],
180
+ queue=True,
181
+ show_progress="full",
182
+ show_api=True,
183
+ )
184
+
185
+ gr.on( #lambda x: None, inputs=None, outputs=result).then(
186
+ triggers=[run_translate_button.click],
187
+ fn=_infer, # dummy fn for api
188
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
189
+ guidance_scale, num_inference_steps, model_name,
190
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
191
+ sampler, vae_model, auto_trans],
192
+ outputs=[result],
193
+ queue=False,
194
+ show_api=True,
195
+ api_name="infer_translate",
196
+ ).success(
197
+ fn=dolphin_respond_auto,
198
+ inputs=[prompt, chatbot],
199
+ outputs=[chatbot],
200
+ queue=True,
201
+ show_progress="full",
202
+ show_api=False,
203
+ ).success(
204
+ fn=dolphin_parse_simple,
205
+ inputs=[prompt, chatbot],
206
+ outputs=[prompt],
207
+ queue=False,
208
+ show_api=False,
209
+ ).success(
210
+ fn=infer,
211
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
212
+ guidance_scale, num_inference_steps, model_name,
213
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
214
+ sampler, vae_model],
215
+ outputs=[result],
216
+ queue=True,
217
+ show_progress="full",
218
+ show_api=False,
219
+ ).success(lambda: None, None, chatbot, queue=False, show_api=False)\
220
+ .success(pass_result, [result], [result], queue=False, show_api=False) # dummy fn for api
221
+
222
+ gr.on(
223
+ triggers=[lora1.change, lora1_wt.change, lora2.change, lora2_wt.change, lora3.change, lora3_wt.change,
224
+ lora4.change, lora4_wt.change, lora5.change, lora5_wt.change],
225
+ fn=update_loras,
226
+ inputs=[prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt],
227
+ outputs=[prompt, lora1, lora1_wt, lora1_info, lora1_copy, lora1_md,
228
+ lora2, lora2_wt, lora2_info, lora2_copy, lora2_md, lora3, lora3_wt, lora3_info, lora3_copy, lora3_md,
229
+ lora4, lora4_wt, lora4_info, lora4_copy, lora4_md, lora5, lora5_wt, lora5_info, lora5_copy, lora5_md],
230
+ queue=False,
231
+ trigger_mode="once",
232
+ show_api=False,
233
+ )
234
+ lora1_copy.click(apply_lora_prompt, [prompt, lora1_info], [prompt], queue=False, show_api=False)
235
+ lora2_copy.click(apply_lora_prompt, [prompt, lora2_info], [prompt], queue=False, show_api=False)
236
+ lora3_copy.click(apply_lora_prompt, [prompt, lora3_info], [prompt], queue=False, show_api=False)
237
+ lora4_copy.click(apply_lora_prompt, [prompt, lora4_info], [prompt], queue=False, show_api=False)
238
+ lora5_copy.click(apply_lora_prompt, [prompt, lora5_info], [prompt], queue=False, show_api=False)
239
+
240
+ gr.on(
241
+ triggers=[lora_search_civitai_submit.click, lora_search_civitai_query.submit],
242
+ fn=search_civitai_lora,
243
+ inputs=[lora_search_civitai_query, lora_search_civitai_basemodel],
244
+ outputs=[lora_search_civitai_result, lora_search_civitai_desc, lora_search_civitai_submit, lora_search_civitai_query],
245
+ scroll_to_output=True,
246
+ queue=True,
247
+ show_api=False,
248
+ )
249
+ lora_search_civitai_json.change(search_civitai_lora_json, [lora_search_civitai_query, lora_search_civitai_basemodel], [lora_search_civitai_json], queue=True, show_api=True) # fn for api
250
+ lora_search_civitai_result.change(select_civitai_lora, [lora_search_civitai_result], [lora_download_url, lora_search_civitai_desc], scroll_to_output=True, queue=False, show_api=False)
251
+ gr.on(
252
+ triggers=[lora_download.click, lora_download_url.submit],
253
+ fn=download_my_lora,
254
+ inputs=[lora_download_url,lora1, lora2, lora3, lora4, lora5],
255
+ outputs=[lora1, lora2, lora3, lora4, lora5],
256
+ scroll_to_output=True,
257
+ queue=True,
258
+ show_api=False,
259
+ )
260
+
261
+ recom_prompt.change(enable_model_recom_prompt, [recom_prompt], [recom_prompt], queue=False, show_api=False)
262
+ gr.on(
263
+ triggers=[quality_selector.change, style_selector.change],
264
+ fn=process_style_prompt,
265
+ inputs=[prompt, negative_prompt, style_selector, quality_selector],
266
+ outputs=[prompt, negative_prompt],
267
+ queue=False,
268
+ trigger_mode="once",
269
+ )
270
+
271
+ model_detail.change(enable_diffusers_model_detail, [model_detail, model_name], [model_detail, model_name], queue=False, show_api=False)
272
+ model_name.change(get_t2i_model_info, [model_name], [model_info], queue=False, show_api=False)
273
+
274
+ chat_model.change(select_dolphin_model, [chat_model], [chat_model, chat_format, chat_model_info], queue=True, show_progress="full", show_api=False)\
275
+ .success(lambda: None, None, chatbot, queue=False, show_api=False)
276
+ chat_format.change(select_dolphin_format, [chat_format], [chat_format], queue=False, show_api=False)\
277
+ .success(lambda: None, None, chatbot, queue=False, show_api=False)
278
+
279
+ # Tagger
280
+ with gr.Tab("Tags Transformer with Tagger"):
281
+ with gr.Column():
282
+ with gr.Group():
283
+ input_image = gr.Image(label="Input image", type="pil", sources=["upload", "clipboard"], height=256)
284
+ with gr.Accordion(label="Advanced options", open=False):
285
+ general_threshold = gr.Slider(label="Threshold", minimum=0.0, maximum=1.0, value=0.3, step=0.01, interactive=True)
286
+ character_threshold = gr.Slider(label="Character threshold", minimum=0.0, maximum=1.0, value=0.8, step=0.01, interactive=True)
287
+ input_tag_type = gr.Radio(label="Convert tags to", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="danbooru")
288
+ recom_prompt = gr.Radio(label="Insert reccomended prompt", choices=["None", "Animagine", "Pony"], value="None", interactive=True)
289
+ image_algorithms = gr.CheckboxGroup(["Use WD Tagger", "Use Florence-2-SD3-Long-Captioner"], label="Algorithms", value=["Use WD Tagger"])
290
+ keep_tags = gr.Radio(label="Remove tags leaving only the following", choices=["body", "dress", "all"], value="all")
291
+ generate_from_image_btn = gr.Button(value="GENERATE TAGS FROM IMAGE", size="lg", variant="primary")
292
+ with gr.Group():
293
+ with gr.Row():
294
+ input_character = gr.Textbox(label="Character tags", placeholder="hatsune miku")
295
+ input_copyright = gr.Textbox(label="Copyright tags", placeholder="vocaloid")
296
+ random_character = gr.Button(value="Random character 🎲", size="sm")
297
+ input_general = gr.TextArea(label="General tags", lines=4, placeholder="1girl, ...", value="")
298
+ input_tags_to_copy = gr.Textbox(value="", visible=False)
299
+ with gr.Row():
300
+ copy_input_btn = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
301
+ copy_prompt_btn_input = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
302
+ translate_input_prompt_button = gr.Button(value="Translate prompt to English", size="sm", variant="secondary")
303
+ tag_type = gr.Radio(label="Output tag conversion", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="e621", visible=False)
304
+ input_rating = gr.Radio(label="Rating", choices=list(V2_RATING_OPTIONS), value="explicit")
305
+ with gr.Accordion(label="Advanced options", open=False):
306
+ input_aspect_ratio = gr.Radio(label="Aspect ratio", info="The aspect ratio of the image.", choices=list(V2_ASPECT_RATIO_OPTIONS), value="square")
307
+ input_length = gr.Radio(label="Length", info="The total length of the tags.", choices=list(V2_LENGTH_OPTIONS), value="very_long")
308
+ input_identity = gr.Radio(label="Keep identity", info="How strictly to keep the identity of the character or subject. If you specify the detail of subject in the prompt, you should choose `strict`. Otherwise, choose `none` or `lax`. `none` is very creative but sometimes ignores the input prompt.", choices=list(V2_IDENTITY_OPTIONS), value="lax")
309
+ input_ban_tags = gr.Textbox(label="Ban tags", info="Tags to ban from the output.", placeholder="alternate costumen, ...", value="censored")
310
+ model_name = gr.Dropdown(label="Model", choices=list(V2_ALL_MODELS.keys()), value=list(V2_ALL_MODELS.keys())[0])
311
+ dummy_np = gr.Textbox(label="Negative prompt", value="", visible=False)
312
+ recom_animagine = gr.Textbox(label="Animagine reccomended prompt", value="Animagine", visible=False)
313
+ recom_pony = gr.Textbox(label="Pony reccomended prompt", value="Pony", visible=False)
314
+ generate_btn = gr.Button(value="GENERATE TAGS", size="lg", variant="primary")
315
+ with gr.Row():
316
+ with gr.Group():
317
+ output_text = gr.TextArea(label="Output tags", interactive=False, show_copy_button=True)
318
+ with gr.Row():
319
+ copy_btn = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
320
+ copy_prompt_btn = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
321
+ with gr.Group():
322
+ output_text_pony = gr.TextArea(label="Output tags (Pony e621 style)", interactive=False, show_copy_button=True)
323
+ with gr.Row():
324
+ copy_btn_pony = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
325
+ copy_prompt_btn_pony = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
326
+
327
+ random_character.click(select_random_character, [input_copyright, input_character], [input_copyright, input_character], queue=False, show_api=False)
328
+
329
+ translate_input_prompt_button.click(translate_prompt, [input_general], [input_general], queue=False, show_api=False)
330
+ translate_input_prompt_button.click(translate_prompt, [input_character], [input_character], queue=False, show_api=False)
331
+ translate_input_prompt_button.click(translate_prompt, [input_copyright], [input_copyright], queue=False, show_api=False)
332
+
333
+ generate_from_image_btn.click(
334
+ lambda: ("", "", ""), None, [input_copyright, input_character, input_general], queue=False, show_api=False,
335
+ ).success(
336
+ predict_tags_wd,
337
+ [input_image, input_general, image_algorithms, general_threshold, character_threshold],
338
+ [input_copyright, input_character, input_general, copy_input_btn],
339
+ show_api=False,
340
+ ).success(
341
+ predict_tags_fl2_sd3, [input_image, input_general, image_algorithms], [input_general], show_api=False,
342
+ ).success(
343
+ remove_specific_prompt, [input_general, keep_tags], [input_general], queue=False, show_api=False,
344
+ ).success(
345
+ convert_danbooru_to_e621_prompt, [input_general, input_tag_type], [input_general], queue=False, show_api=False,
346
+ ).success(
347
+ insert_recom_prompt, [input_general, dummy_np, recom_prompt], [input_general, dummy_np], queue=False, show_api=False,
348
+ ).success(lambda: gr.update(interactive=True), None, [copy_prompt_btn_input], queue=False, show_api=False)
349
+ copy_input_btn.click(compose_prompt_to_copy, [input_character, input_copyright, input_general], [input_tags_to_copy], show_api=False)\
350
+ .success(gradio_copy_text, [input_tags_to_copy], js=COPY_ACTION_JS, show_api=False)
351
+ copy_prompt_btn_input.click(compose_prompt_to_copy, inputs=[input_character, input_copyright, input_general], outputs=[input_tags_to_copy], show_api=False)\
352
+ .success(gradio_copy_prompt, inputs=[input_tags_to_copy], outputs=[prompt], show_api=False)
353
+
354
+ generate_btn.click(
355
+ v2_upsampling_prompt,
356
+ [model_name, input_copyright, input_character, input_general,
357
+ input_rating, input_aspect_ratio, input_length, input_identity, input_ban_tags],
358
+ [output_text],
359
+ show_api=False,
360
+ ).success(
361
+ convert_danbooru_to_e621_prompt, [output_text, tag_type], [output_text_pony], queue=False, show_api=False,
362
+ ).success(
363
+ insert_recom_prompt, [output_text, dummy_np, recom_animagine], [output_text, dummy_np], queue=False, show_api=False,
364
+ ).success(
365
+ insert_recom_prompt, [output_text_pony, dummy_np, recom_pony], [output_text_pony, dummy_np], queue=False, show_api=False,
366
+ ).success(lambda: (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)),
367
+ None, [copy_btn, copy_btn_pony, copy_prompt_btn, copy_prompt_btn_pony], queue=False, show_api=False)
368
+ copy_btn.click(gradio_copy_text, [output_text], js=COPY_ACTION_JS, show_api=False)
369
+ copy_btn_pony.click(gradio_copy_text, [output_text_pony], js=COPY_ACTION_JS, show_api=False)
370
+ copy_prompt_btn.click(gradio_copy_prompt, inputs=[output_text], outputs=[prompt], show_api=False)
371
+ copy_prompt_btn_pony.click(gradio_copy_prompt, inputs=[output_text_pony], outputs=[prompt], show_api=False)
372
+
373
+ gr.LoginButton()
374
+ gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
375
+
376
+ demo.queue()
377
+ demo.launch()
dc.py ADDED
@@ -0,0 +1,1323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ from stablepy import Model_Diffusers
4
+ from stablepy.diffusers_vanilla.model import scheduler_names
5
+ from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
6
+ import torch
7
+ import re
8
+ import shutil
9
+ import random
10
+ from stablepy import (
11
+ CONTROLNET_MODEL_IDS,
12
+ VALID_TASKS,
13
+ T2I_PREPROCESSOR_NAME,
14
+ FLASH_LORA,
15
+ SCHEDULER_CONFIG_MAP,
16
+ scheduler_names,
17
+ IP_ADAPTER_MODELS,
18
+ IP_ADAPTERS_SD,
19
+ IP_ADAPTERS_SDXL,
20
+ REPO_IMAGE_ENCODER,
21
+ ALL_PROMPT_WEIGHT_OPTIONS,
22
+ SD15_TASKS,
23
+ SDXL_TASKS,
24
+ )
25
+ import urllib.parse
26
+ import gradio as gr
27
+ from PIL import Image
28
+ import IPython.display
29
+ import time, json
30
+ from IPython.utils import capture
31
+ import logging
32
+ logging.getLogger("diffusers").setLevel(logging.ERROR)
33
+ import diffusers
34
+ diffusers.utils.logging.set_verbosity(40)
35
+ import warnings
36
+ warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
37
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
38
+ warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
39
+ from stablepy import logger
40
+ logger.setLevel(logging.CRITICAL)
41
+
42
+ from env import (
43
+ hf_token, hf_read_token, # to use only for private repos
44
+ CIVITAI_API_KEY, HF_LORA_PRIVATE_REPOS1, HF_LORA_PRIVATE_REPOS2,
45
+ HF_LORA_ESSENTIAL_PRIVATE_REPO, HF_VAE_PRIVATE_REPO,
46
+ HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO,
47
+ directory_models, directory_loras, directory_vaes, directory_embeds,
48
+ directory_embeds_sdxl, directory_embeds_positive_sdxl,
49
+ load_diffusers_format_model, download_model_list, download_lora_list,
50
+ download_vae_list, download_embeds)
51
+
52
+ preprocessor_controlnet = {
53
+ "openpose": [
54
+ "Openpose",
55
+ "None",
56
+ ],
57
+ "scribble": [
58
+ "HED",
59
+ "Pidinet",
60
+ "None",
61
+ ],
62
+ "softedge": [
63
+ "Pidinet",
64
+ "HED",
65
+ "HED safe",
66
+ "Pidinet safe",
67
+ "None",
68
+ ],
69
+ "segmentation": [
70
+ "UPerNet",
71
+ "None",
72
+ ],
73
+ "depth": [
74
+ "DPT",
75
+ "Midas",
76
+ "None",
77
+ ],
78
+ "normalbae": [
79
+ "NormalBae",
80
+ "None",
81
+ ],
82
+ "lineart": [
83
+ "Lineart",
84
+ "Lineart coarse",
85
+ "Lineart (anime)",
86
+ "None",
87
+ "None (anime)",
88
+ ],
89
+ "shuffle": [
90
+ "ContentShuffle",
91
+ "None",
92
+ ],
93
+ "canny": [
94
+ "Canny"
95
+ ],
96
+ "mlsd": [
97
+ "MLSD"
98
+ ],
99
+ "ip2p": [
100
+ "ip2p"
101
+ ],
102
+ }
103
+
104
+ task_stablepy = {
105
+ 'txt2img': 'txt2img',
106
+ 'img2img': 'img2img',
107
+ 'inpaint': 'inpaint',
108
+ # 'canny T2I Adapter': 'sdxl_canny_t2i', # NO HAVE STEP CALLBACK PARAMETERS SO NOT WORKS WITH DIFFUSERS 0.29.0
109
+ # 'sketch T2I Adapter': 'sdxl_sketch_t2i',
110
+ # 'lineart T2I Adapter': 'sdxl_lineart_t2i',
111
+ # 'depth-midas T2I Adapter': 'sdxl_depth-midas_t2i',
112
+ # 'openpose T2I Adapter': 'sdxl_openpose_t2i',
113
+ 'openpose ControlNet': 'openpose',
114
+ 'canny ControlNet': 'canny',
115
+ 'mlsd ControlNet': 'mlsd',
116
+ 'scribble ControlNet': 'scribble',
117
+ 'softedge ControlNet': 'softedge',
118
+ 'segmentation ControlNet': 'segmentation',
119
+ 'depth ControlNet': 'depth',
120
+ 'normalbae ControlNet': 'normalbae',
121
+ 'lineart ControlNet': 'lineart',
122
+ # 'lineart_anime ControlNet': 'lineart_anime',
123
+ 'shuffle ControlNet': 'shuffle',
124
+ 'ip2p ControlNet': 'ip2p',
125
+ 'optical pattern ControlNet': 'pattern',
126
+ 'tile realistic': 'sdxl_tile_realistic',
127
+ }
128
+
129
+ task_model_list = list(task_stablepy.keys())
130
+
131
+
132
+ def download_things(directory, url, hf_token="", civitai_api_key=""):
133
+ url = url.strip()
134
+
135
+ if "drive.google.com" in url:
136
+ original_dir = os.getcwd()
137
+ os.chdir(directory)
138
+ os.system(f"gdown --fuzzy {url}")
139
+ os.chdir(original_dir)
140
+ elif "huggingface.co" in url:
141
+ url = url.replace("?download=true", "")
142
+ # url = urllib.parse.quote(url, safe=':/') # fix encoding
143
+ if "/blob/" in url:
144
+ url = url.replace("/blob/", "/resolve/")
145
+ user_header = f'"Authorization: Bearer {hf_token}"'
146
+ if hf_token:
147
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
148
+ else:
149
+ os.system (f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
150
+ elif "civitai.com" in url:
151
+ if "?" in url:
152
+ url = url.split("?")[0]
153
+ if civitai_api_key:
154
+ url = url + f"?token={civitai_api_key}"
155
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
156
+ else:
157
+ print("\033[91mYou need an API key to download Civitai models.\033[0m")
158
+ else:
159
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
160
+
161
+
162
+ def get_model_list(directory_path):
163
+ model_list = []
164
+ valid_extensions = {'.ckpt' , '.pt', '.pth', '.safetensors', '.bin'}
165
+
166
+ for filename in os.listdir(directory_path):
167
+ if os.path.splitext(filename)[1] in valid_extensions:
168
+ name_without_extension = os.path.splitext(filename)[0]
169
+ file_path = os.path.join(directory_path, filename)
170
+ # model_list.append((name_without_extension, file_path))
171
+ model_list.append(file_path)
172
+ print('\033[34mFILE: ' + file_path + '\033[0m')
173
+ return model_list
174
+
175
+
176
+ def process_string(input_string):
177
+ parts = input_string.split('/')
178
+
179
+ if len(parts) == 2:
180
+ first_element = parts[1]
181
+ complete_string = input_string
182
+ result = (first_element, complete_string)
183
+ return result
184
+ else:
185
+ return None
186
+
187
+ ## BEGIN MOD
188
+ from modutils import (to_list, list_uniq, list_sub, get_model_id_list, get_tupled_embed_list,
189
+ get_tupled_model_list, get_lora_model_list, download_private_repo)
190
+
191
+ # - **Download Models**
192
+ download_model = ", ".join(download_model_list)
193
+ # - **Download VAEs**
194
+ download_vae = ", ".join(download_vae_list)
195
+ # - **Download LoRAs**
196
+ download_lora = ", ".join(download_lora_list)
197
+
198
+ #download_private_repo(HF_LORA_ESSENTIAL_PRIVATE_REPO, directory_loras, True)
199
+ download_private_repo(HF_VAE_PRIVATE_REPO, directory_vaes, False)
200
+
201
+ load_diffusers_format_model = list_uniq(load_diffusers_format_model + get_model_id_list())
202
+ ## END MOD
203
+
204
+ CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
205
+ hf_token = os.environ.get("HF_TOKEN")
206
+
207
+ # Download stuffs
208
+ for url in [url.strip() for url in download_model.split(',')]:
209
+ if not os.path.exists(f"./models/{url.split('/')[-1]}"):
210
+ download_things(directory_models, url, hf_token, CIVITAI_API_KEY)
211
+ for url in [url.strip() for url in download_vae.split(',')]:
212
+ if not os.path.exists(f"./vaes/{url.split('/')[-1]}"):
213
+ download_things(directory_vaes, url, hf_token, CIVITAI_API_KEY)
214
+ for url in [url.strip() for url in download_lora.split(',')]:
215
+ if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
216
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
217
+
218
+ # Download Embeddings
219
+ for url_embed in download_embeds:
220
+ if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
221
+ download_things(directory_embeds, url_embed, hf_token, CIVITAI_API_KEY)
222
+
223
+ # Build list models
224
+ embed_list = get_model_list(directory_embeds)
225
+ model_list = get_model_list(directory_models)
226
+ model_list = load_diffusers_format_model + model_list
227
+ ## BEGIN MOD
228
+ lora_model_list = get_lora_model_list()
229
+ vae_model_list = get_model_list(directory_vaes)
230
+ vae_model_list.insert(0, "None")
231
+
232
+ #download_private_repo(HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, directory_embeds_sdxl, False)
233
+ #download_private_repo(HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO, directory_embeds_positive_sdxl, False)
234
+ embed_sdxl_list = get_model_list(directory_embeds_sdxl) + get_model_list(directory_embeds_positive_sdxl)
235
+
236
+ def get_embed_list(pipeline_name):
237
+ return get_tupled_embed_list(embed_sdxl_list if pipeline_name == "StableDiffusionXLPipeline" else embed_list)
238
+
239
+
240
+ ## END MOD
241
+
242
+ print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
243
+
244
+ upscaler_dict_gui = {
245
+ None : None,
246
+ "Lanczos" : "Lanczos",
247
+ "Nearest" : "Nearest",
248
+ "RealESRGAN_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
249
+ "RealESRNet_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
250
+ "RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
251
+ "RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
252
+ "realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
253
+ "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
254
+ "realesr-general-wdn-x4v3" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
255
+ "4x-UltraSharp" : "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
256
+ "4x_foolhardy_Remacri" : "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
257
+ "Remacri4xExtraSmoother" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
258
+ "AnimeSharp4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
259
+ "lollypop" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/lollypop.pth",
260
+ "RealisticRescaler4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/RealisticRescaler%204x.pth",
261
+ "NickelbackFS4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/NickelbackFS%204x.pth"
262
+ }
263
+
264
+
265
+ def extract_parameters(input_string):
266
+ parameters = {}
267
+ input_string = input_string.replace("\n", "")
268
+
269
+ if not "Negative prompt:" in input_string:
270
+ print("Negative prompt not detected")
271
+ parameters["prompt"] = input_string
272
+ return parameters
273
+
274
+ parm = input_string.split("Negative prompt:")
275
+ parameters["prompt"] = parm[0]
276
+ if not "Steps:" in parm[1]:
277
+ print("Steps not detected")
278
+ parameters["neg_prompt"] = parm[1]
279
+ return parameters
280
+ parm = parm[1].split("Steps:")
281
+ parameters["neg_prompt"] = parm[0]
282
+ input_string = "Steps:" + parm[1]
283
+
284
+ # Extracting Steps
285
+ steps_match = re.search(r'Steps: (\d+)', input_string)
286
+ if steps_match:
287
+ parameters['Steps'] = int(steps_match.group(1))
288
+
289
+ # Extracting Size
290
+ size_match = re.search(r'Size: (\d+x\d+)', input_string)
291
+ if size_match:
292
+ parameters['Size'] = size_match.group(1)
293
+ width, height = map(int, parameters['Size'].split('x'))
294
+ parameters['width'] = width
295
+ parameters['height'] = height
296
+
297
+ # Extracting other parameters
298
+ other_parameters = re.findall(r'(\w+): (.*?)(?=, \w+|$)', input_string)
299
+ for param in other_parameters:
300
+ parameters[param[0]] = param[1].strip('"')
301
+
302
+ return parameters
303
+
304
+
305
+ ## BEGIN MOD
306
+ class GuiSD:
307
+ def __init__(self):
308
+ self.model = None
309
+
310
+ print("Loading model...")
311
+ self.model = Model_Diffusers(
312
+ base_model_id="cagliostrolab/animagine-xl-3.1",
313
+ task_name="txt2img",
314
+ vae_model=None,
315
+ type_model_precision=torch.float16,
316
+ retain_task_model_in_cache=False,
317
+ #device="cpu",
318
+ )
319
+ self.model.device = torch.device("cpu") #
320
+
321
+ def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
322
+ progress(0, desc="Start inference...")
323
+ images, seed, image_list, metadata = model(**pipe_params)
324
+ progress(1, desc="Inference completed.")
325
+ if not isinstance(images, list): images = [images]
326
+ images = save_images(images, metadata)
327
+ img = []
328
+ for image in images:
329
+ img.append((image, None))
330
+ return img
331
+
332
+ def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
333
+
334
+ yield f"Loading model: {model_name}"
335
+
336
+ vae_model = vae_model if vae_model != "None" else None
337
+
338
+ if model_name in model_list:
339
+ model_is_xl = "xl" in model_name.lower()
340
+ sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
341
+ model_type = "SDXL" if model_is_xl else "SD 1.5"
342
+ incompatible_vae = (model_is_xl and vae_model and not sdxl_in_vae) or (not model_is_xl and sdxl_in_vae)
343
+
344
+ if incompatible_vae:
345
+ vae_model = None
346
+
347
+ self.model.device = torch.device("cpu")
348
+
349
+ self.model.load_pipe(
350
+ model_name,
351
+ task_name=task_stablepy[task],
352
+ vae_model=vae_model if vae_model != "None" else None,
353
+ type_model_precision=torch.float16,
354
+ retain_task_model_in_cache=False,
355
+ )
356
+ yield f"Model loaded: {model_name}"
357
+
358
+ @spaces.GPU
359
+ def generate_pipeline(
360
+ self,
361
+ prompt,
362
+ neg_prompt,
363
+ num_images,
364
+ steps,
365
+ cfg,
366
+ clip_skip,
367
+ seed,
368
+ lora1,
369
+ lora_scale1,
370
+ lora2,
371
+ lora_scale2,
372
+ lora3,
373
+ lora_scale3,
374
+ lora4,
375
+ lora_scale4,
376
+ lora5,
377
+ lora_scale5,
378
+ sampler,
379
+ img_height,
380
+ img_width,
381
+ model_name,
382
+ vae_model,
383
+ task,
384
+ image_control,
385
+ preprocessor_name,
386
+ preprocess_resolution,
387
+ image_resolution,
388
+ style_prompt, # list []
389
+ style_json_file,
390
+ image_mask,
391
+ strength,
392
+ low_threshold,
393
+ high_threshold,
394
+ value_threshold,
395
+ distance_threshold,
396
+ controlnet_output_scaling_in_unet,
397
+ controlnet_start_threshold,
398
+ controlnet_stop_threshold,
399
+ textual_inversion,
400
+ syntax_weights,
401
+ upscaler_model_path,
402
+ upscaler_increases_size,
403
+ esrgan_tile,
404
+ esrgan_tile_overlap,
405
+ hires_steps,
406
+ hires_denoising_strength,
407
+ hires_sampler,
408
+ hires_prompt,
409
+ hires_negative_prompt,
410
+ hires_before_adetailer,
411
+ hires_after_adetailer,
412
+ loop_generation,
413
+ leave_progress_bar,
414
+ disable_progress_bar,
415
+ image_previews,
416
+ display_images,
417
+ save_generated_images,
418
+ image_storage_location,
419
+ retain_compel_previous_load,
420
+ retain_detailfix_model_previous_load,
421
+ retain_hires_model_previous_load,
422
+ t2i_adapter_preprocessor,
423
+ t2i_adapter_conditioning_scale,
424
+ t2i_adapter_conditioning_factor,
425
+ xformers_memory_efficient_attention,
426
+ freeu,
427
+ generator_in_cpu,
428
+ adetailer_inpaint_only,
429
+ adetailer_verbose,
430
+ adetailer_sampler,
431
+ adetailer_active_a,
432
+ prompt_ad_a,
433
+ negative_prompt_ad_a,
434
+ strength_ad_a,
435
+ face_detector_ad_a,
436
+ person_detector_ad_a,
437
+ hand_detector_ad_a,
438
+ mask_dilation_a,
439
+ mask_blur_a,
440
+ mask_padding_a,
441
+ adetailer_active_b,
442
+ prompt_ad_b,
443
+ negative_prompt_ad_b,
444
+ strength_ad_b,
445
+ face_detector_ad_b,
446
+ person_detector_ad_b,
447
+ hand_detector_ad_b,
448
+ mask_dilation_b,
449
+ mask_blur_b,
450
+ mask_padding_b,
451
+ retain_task_cache_gui,
452
+ image_ip1,
453
+ mask_ip1,
454
+ model_ip1,
455
+ mode_ip1,
456
+ scale_ip1,
457
+ image_ip2,
458
+ mask_ip2,
459
+ model_ip2,
460
+ mode_ip2,
461
+ scale_ip2,
462
+ progress=gr.Progress(track_tqdm=True),
463
+ ):
464
+ progress(0, desc="Preparing inference...")
465
+
466
+ vae_model = vae_model if vae_model != "None" else None
467
+ loras_list = [lora1, lora2, lora3, lora4, lora5]
468
+ vae_msg = f"VAE: {vae_model}" if vae_model else ""
469
+ msg_lora = []
470
+
471
+ ## BEGIN MOD
472
+ prompt, neg_prompt = insert_model_recom_prompt(prompt, neg_prompt, model_name)
473
+ global lora_model_list
474
+ lora_model_list = get_lora_model_list()
475
+ ## END MOD
476
+
477
+ if model_name in model_list:
478
+ model_is_xl = "xl" in model_name.lower()
479
+ sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
480
+ model_type = "SDXL" if model_is_xl else "SD 1.5"
481
+ incompatible_vae = (model_is_xl and vae_model and not sdxl_in_vae) or (not model_is_xl and sdxl_in_vae)
482
+
483
+ if incompatible_vae:
484
+ msg_inc_vae = (
485
+ f"The selected VAE is for a { 'SD 1.5' if model_is_xl else 'SDXL' } model, but you"
486
+ f" are using a { model_type } model. The default VAE "
487
+ "will be used."
488
+ )
489
+ gr.Info(msg_inc_vae)
490
+ vae_msg = msg_inc_vae
491
+ vae_model = None
492
+
493
+ for la in loras_list:
494
+ if la is not None and la != "None" and la in lora_model_list:
495
+ print(la)
496
+ lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
497
+ if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
498
+ msg_inc_lora = f"The LoRA {la} is for { 'SD 1.5' if model_is_xl else 'SDXL' }, but you are using { model_type }."
499
+ gr.Info(msg_inc_lora)
500
+ msg_lora.append(msg_inc_lora)
501
+
502
+ task = task_stablepy[task]
503
+
504
+ params_ip_img = []
505
+ params_ip_msk = []
506
+ params_ip_model = []
507
+ params_ip_mode = []
508
+ params_ip_scale = []
509
+
510
+ all_adapters = [
511
+ (image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1),
512
+ (image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2),
513
+ ]
514
+
515
+ for imgip, mskip, modelip, modeip, scaleip in all_adapters:
516
+ if imgip:
517
+ params_ip_img.append(imgip)
518
+ if mskip:
519
+ params_ip_msk.append(mskip)
520
+ params_ip_model.append(modelip)
521
+ params_ip_mode.append(modeip)
522
+ params_ip_scale.append(scaleip)
523
+
524
+ # First load
525
+ self.model.device = torch.device("cuda:0")
526
+ model_precision = torch.float16
527
+ if not self.model:
528
+ from stablepy import Model_Diffusers
529
+
530
+ print("Loading model...")
531
+ self.model = Model_Diffusers(
532
+ base_model_id=model_name,
533
+ task_name=task,
534
+ vae_model=vae_model if vae_model != "None" else None,
535
+ type_model_precision=model_precision,
536
+ retain_task_model_in_cache=retain_task_cache_gui,
537
+ )
538
+
539
+ if task != "txt2img" and not image_control:
540
+ raise ValueError("No control image found: To use this function, you have to upload an image in 'Image ControlNet/Inpaint/Img2img'")
541
+
542
+ if task == "inpaint" and not image_mask:
543
+ raise ValueError("No mask image found: Specify one in 'Image Mask'")
544
+
545
+ if upscaler_model_path in [None, "Lanczos", "Nearest"]:
546
+ upscaler_model = upscaler_model_path
547
+ else:
548
+ directory_upscalers = 'upscalers'
549
+ os.makedirs(directory_upscalers, exist_ok=True)
550
+
551
+ url_upscaler = upscaler_dict_gui[upscaler_model_path]
552
+
553
+ if not os.path.exists(f"./upscalers/{url_upscaler.split('/')[-1]}"):
554
+ download_things(directory_upscalers, url_upscaler, hf_token)
555
+
556
+ upscaler_model = f"./upscalers/{url_upscaler.split('/')[-1]}"
557
+
558
+ logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
559
+
560
+ print("Config model:", model_name, vae_model, loras_list)
561
+
562
+ self.model.load_pipe(
563
+ model_name,
564
+ task_name=task,
565
+ vae_model=vae_model if vae_model != "None" else None,
566
+ type_model_precision=model_precision,
567
+ retain_task_model_in_cache=retain_task_cache_gui,
568
+ )
569
+
570
+ ## BEGIN MOD
571
+ # if textual_inversion and self.model.class_name == "StableDiffusionXLPipeline":
572
+ # print("No Textual inversion for SDXL")
573
+ ## END MOD
574
+
575
+ adetailer_params_A = {
576
+ "face_detector_ad" : face_detector_ad_a,
577
+ "person_detector_ad" : person_detector_ad_a,
578
+ "hand_detector_ad" : hand_detector_ad_a,
579
+ "prompt": prompt_ad_a,
580
+ "negative_prompt" : negative_prompt_ad_a,
581
+ "strength" : strength_ad_a,
582
+ # "image_list_task" : None,
583
+ "mask_dilation" : mask_dilation_a,
584
+ "mask_blur" : mask_blur_a,
585
+ "mask_padding" : mask_padding_a,
586
+ "inpaint_only" : adetailer_inpaint_only,
587
+ "sampler" : adetailer_sampler,
588
+ }
589
+
590
+ adetailer_params_B = {
591
+ "face_detector_ad" : face_detector_ad_b,
592
+ "person_detector_ad" : person_detector_ad_b,
593
+ "hand_detector_ad" : hand_detector_ad_b,
594
+ "prompt": prompt_ad_b,
595
+ "negative_prompt" : negative_prompt_ad_b,
596
+ "strength" : strength_ad_b,
597
+ # "image_list_task" : None,
598
+ "mask_dilation" : mask_dilation_b,
599
+ "mask_blur" : mask_blur_b,
600
+ "mask_padding" : mask_padding_b,
601
+ }
602
+ pipe_params = {
603
+ "prompt": prompt,
604
+ "negative_prompt": neg_prompt,
605
+ "img_height": img_height,
606
+ "img_width": img_width,
607
+ "num_images": num_images,
608
+ "num_steps": steps,
609
+ "guidance_scale": cfg,
610
+ "clip_skip": clip_skip,
611
+ "seed": seed,
612
+ "image": image_control,
613
+ "preprocessor_name": preprocessor_name,
614
+ "preprocess_resolution": preprocess_resolution,
615
+ "image_resolution": image_resolution,
616
+ "style_prompt": style_prompt if style_prompt else "",
617
+ "style_json_file": "",
618
+ "image_mask": image_mask, # only for Inpaint
619
+ "strength": strength, # only for Inpaint or ...
620
+ "low_threshold": low_threshold,
621
+ "high_threshold": high_threshold,
622
+ "value_threshold": value_threshold,
623
+ "distance_threshold": distance_threshold,
624
+ "lora_A": lora1 if lora1 != "None" else None,
625
+ "lora_scale_A": lora_scale1,
626
+ "lora_B": lora2 if lora2 != "None" else None,
627
+ "lora_scale_B": lora_scale2,
628
+ "lora_C": lora3 if lora3 != "None" else None,
629
+ "lora_scale_C": lora_scale3,
630
+ "lora_D": lora4 if lora4 != "None" else None,
631
+ "lora_scale_D": lora_scale4,
632
+ "lora_E": lora5 if lora5 != "None" else None,
633
+ "lora_scale_E": lora_scale5,
634
+ ## BEGIN MOD
635
+ "textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
636
+ ## END MOD
637
+ "syntax_weights": syntax_weights, # "Classic"
638
+ "sampler": sampler,
639
+ "xformers_memory_efficient_attention": xformers_memory_efficient_attention,
640
+ "gui_active": True,
641
+ "loop_generation": loop_generation,
642
+ "controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
643
+ "control_guidance_start": float(controlnet_start_threshold),
644
+ "control_guidance_end": float(controlnet_stop_threshold),
645
+ "generator_in_cpu": generator_in_cpu,
646
+ "FreeU": freeu,
647
+ "adetailer_A": adetailer_active_a,
648
+ "adetailer_A_params": adetailer_params_A,
649
+ "adetailer_B": adetailer_active_b,
650
+ "adetailer_B_params": adetailer_params_B,
651
+ "leave_progress_bar": leave_progress_bar,
652
+ "disable_progress_bar": disable_progress_bar,
653
+ "image_previews": image_previews,
654
+ "display_images": display_images,
655
+ "save_generated_images": save_generated_images,
656
+ "image_storage_location": image_storage_location,
657
+ "retain_compel_previous_load": retain_compel_previous_load,
658
+ "retain_detailfix_model_previous_load": retain_detailfix_model_previous_load,
659
+ "retain_hires_model_previous_load": retain_hires_model_previous_load,
660
+ "t2i_adapter_preprocessor": t2i_adapter_preprocessor,
661
+ "t2i_adapter_conditioning_scale": float(t2i_adapter_conditioning_scale),
662
+ "t2i_adapter_conditioning_factor": float(t2i_adapter_conditioning_factor),
663
+ "upscaler_model_path": upscaler_model,
664
+ "upscaler_increases_size": upscaler_increases_size,
665
+ "esrgan_tile": esrgan_tile,
666
+ "esrgan_tile_overlap": esrgan_tile_overlap,
667
+ "hires_steps": hires_steps,
668
+ "hires_denoising_strength": hires_denoising_strength,
669
+ "hires_prompt": hires_prompt,
670
+ "hires_negative_prompt": hires_negative_prompt,
671
+ "hires_sampler": hires_sampler,
672
+ "hires_before_adetailer": hires_before_adetailer,
673
+ "hires_after_adetailer": hires_after_adetailer,
674
+ "ip_adapter_image": params_ip_img,
675
+ "ip_adapter_mask": params_ip_msk,
676
+ "ip_adapter_model": params_ip_model,
677
+ "ip_adapter_mode": params_ip_mode,
678
+ "ip_adapter_scale": params_ip_scale,
679
+ }
680
+
681
+ # Maybe fix lora issue: 'Cannot copy out of meta tensor; no data!''
682
+ #self.model.pipe.to("cuda:0" if torch.cuda.is_available() else "cpu")
683
+
684
+ progress(1, desc="Inference preparation completed. Starting inference...")
685
+
686
+ info_state = "" # for yield version
687
+ return self.infer_short(self.model, pipe_params, progress), info_state
688
+ ## END MOD
689
+
690
+
691
+ from pathlib import Path
692
+ from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
693
+ get_local_model_list, get_private_lora_model_lists, get_valid_lora_name,
694
+ get_valid_lora_path, get_valid_lora_wt, get_lora_info,
695
+ normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en)
696
+
697
+ sd_gen = GuiSD()
698
+ #@spaces.GPU
699
+ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
700
+ model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
701
+ lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
702
+ sampler = "Euler a", vae = None, translate=True, progress=gr.Progress(track_tqdm=True)):
703
+ import PIL
704
+ import numpy as np
705
+ MAX_SEED = np.iinfo(np.int32).max
706
+
707
+ images: list[tuple[PIL.Image.Image, str | None]] = []
708
+ info: str = ""
709
+ progress(0, desc="Preparing...")
710
+
711
+ if randomize_seed:
712
+ seed = random.randint(0, MAX_SEED)
713
+
714
+ generator = torch.Generator().manual_seed(seed).seed()
715
+
716
+ if translate:
717
+ prompt = translate_to_en(prompt)
718
+ negative_prompt = translate_to_en(prompt)
719
+
720
+ prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name)
721
+ progress(0.5, desc="Preparing...")
722
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
723
+ set_prompt_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
724
+ lora1 = get_valid_lora_path(lora1)
725
+ lora2 = get_valid_lora_path(lora2)
726
+ lora3 = get_valid_lora_path(lora3)
727
+ lora4 = get_valid_lora_path(lora4)
728
+ lora5 = get_valid_lora_path(lora5)
729
+ progress(1, desc="Preparation completed. Starting inference preparation...")
730
+
731
+ sd_gen.load_new_model(model_name, vae, task_model_list[0], progress)
732
+ images, info = sd_gen.generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
733
+ guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
734
+ lora4, lora4_wt, lora5, lora5_wt, sampler,
735
+ height, width, model_name, vae, task_model_list[0], None, "Canny", 512, 1024,
736
+ None, None, None, 0.35, 100, 200, 0.1, 0.1, 1.0, 0., 1., False, "Classic", None,
737
+ 1.0, 100, 10, 30, 0.55, "Use same sampler", "", "",
738
+ False, True, 1, True, False, False, False, False, "./images", False, False, False, True, 1, 0.55,
739
+ False, False, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
740
+ False, "", "", 0.35, True, True, False, 4, 4, 32,
741
+ True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, progress
742
+ )
743
+
744
+ progress(1, desc="Inference completed.")
745
+ output_image = images[0][0] if images else None
746
+
747
+ return output_image
748
+
749
+
750
+ #@spaces.GPU
751
+ def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
752
+ model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
753
+ lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
754
+ sampler = "Euler a", vae = None, translate = True, progress=gr.Progress(track_tqdm=True)):
755
+ return gr.update(visible=True)
756
+
757
+
758
+ infer.zerogpu = True
759
+ _infer.zerogpu = True
760
+
761
+
762
+ def pass_result(result):
763
+ return result
764
+
765
+
766
+ def get_samplers():
767
+ return scheduler_names
768
+
769
+
770
+ def get_vaes():
771
+ return vae_model_list
772
+
773
+
774
+ show_diffusers_model_list_detail = False
775
+ cached_diffusers_model_tupled_list = get_tupled_model_list(load_diffusers_format_model)
776
+ def get_diffusers_model_list():
777
+ if show_diffusers_model_list_detail:
778
+ return cached_diffusers_model_tupled_list
779
+ else:
780
+ return load_diffusers_format_model
781
+
782
+
783
+ def enable_diffusers_model_detail(is_enable: bool = False, model_name: str = ""):
784
+ global show_diffusers_model_list_detail
785
+ show_diffusers_model_list_detail = is_enable
786
+ new_value = model_name
787
+ index = 0
788
+ if model_name in set(load_diffusers_format_model):
789
+ index = load_diffusers_format_model.index(model_name)
790
+ if is_enable:
791
+ new_value = cached_diffusers_model_tupled_list[index][1]
792
+ else:
793
+ new_value = load_diffusers_format_model[index]
794
+ return gr.update(value=is_enable), gr.update(value=new_value, choices=get_diffusers_model_list())
795
+
796
+
797
+ def get_t2i_model_info(repo_id: str):
798
+ from huggingface_hub import HfApi
799
+ api = HfApi()
800
+ try:
801
+ if " " in repo_id or not api.repo_exists(repo_id): return ""
802
+ model = api.model_info(repo_id=repo_id)
803
+ except Exception as e:
804
+ print(f"Error: Failed to get {repo_id}'s info. ")
805
+ return ""
806
+ if model.private or model.gated: return ""
807
+ tags = model.tags
808
+ info = []
809
+ url = f"https://huggingface.co/{repo_id}/"
810
+ if not 'diffusers' in tags: return ""
811
+ if 'diffusers:FluxPipeline' in tags:
812
+ info.append("FLUX.1")
813
+ elif 'diffusers:StableDiffusionXLPipeline' in tags:
814
+ info.append("SDXL")
815
+ elif 'diffusers:StableDiffusionPipeline' in tags:
816
+ info.append("SD1.5")
817
+ if model.card_data and model.card_data.tags:
818
+ info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
819
+ info.append(f"DLs: {model.downloads}")
820
+ info.append(f"likes: {model.likes}")
821
+ info.append(model.last_modified.strftime("lastmod: %Y-%m-%d"))
822
+ md = f"Model Info: {', '.join(info)}, [Model Repo]({url})"
823
+ return gr.update(value=md)
824
+
825
+
826
+ def load_model_prompt_dict():
827
+ import json
828
+ dict = {}
829
+ try:
830
+ with open('model_dict.json', encoding='utf-8') as f:
831
+ dict = json.load(f)
832
+ except Exception:
833
+ pass
834
+ return dict
835
+
836
+
837
+ model_prompt_dict = load_model_prompt_dict()
838
+
839
+
840
+ model_recom_prompt_enabled = True
841
+ animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
842
+ animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
843
+ pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
844
+ pony_nps = to_list("source_pony, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
845
+ other_ps = to_list("anime artwork, anime style, studio anime, highly detailed, cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed")
846
+ other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
847
+ default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
848
+ default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
849
+ def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None"):
850
+ if not model_recom_prompt_enabled or not model_name: return prompt, neg_prompt
851
+ prompts = to_list(prompt)
852
+ neg_prompts = to_list(neg_prompt)
853
+ prompts = list_sub(prompts, animagine_ps + pony_ps + other_ps)
854
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + other_nps)
855
+ last_empty_p = [""] if not prompts and type != "None" else []
856
+ last_empty_np = [""] if not neg_prompts and type != "None" else []
857
+ ps = []
858
+ nps = []
859
+ if model_name in model_prompt_dict.keys():
860
+ ps = to_list(model_prompt_dict[model_name]["prompt"])
861
+ nps = to_list(model_prompt_dict[model_name]["negative_prompt"])
862
+ else:
863
+ ps = default_ps
864
+ nps = default_nps
865
+ prompts = prompts + ps
866
+ neg_prompts = neg_prompts + nps
867
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
868
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
869
+ return prompt, neg_prompt
870
+
871
+
872
+ def enable_model_recom_prompt(is_enable: bool = True):
873
+ global model_recom_prompt_enabled
874
+ model_recom_prompt_enabled = is_enable
875
+ return is_enable
876
+
877
+
878
+ private_lora_dict = {}
879
+ try:
880
+ with open('lora_dict.json', encoding='utf-8') as f:
881
+ d = json.load(f)
882
+ for k, v in d.items():
883
+ private_lora_dict[escape_lora_basename(k)] = v
884
+ except Exception:
885
+ pass
886
+
887
+
888
+ private_lora_model_list = get_private_lora_model_lists()
889
+ loras_dict = {"None": ["", "", "", "", ""], "": ["", "", "", "", ""]} | private_lora_dict.copy()
890
+ loras_url_to_path_dict = {} # {"URL to download": "local filepath", ...}
891
+ civitai_lora_last_results = {} # {"URL to download": {search results}, ...}
892
+ all_lora_list = []
893
+
894
+
895
+ def get_all_lora_list():
896
+ global all_lora_list
897
+ loras = get_lora_model_list()
898
+ all_lora_list = loras.copy()
899
+ return loras
900
+
901
+
902
+ def get_all_lora_tupled_list():
903
+ global loras_dict
904
+ models = get_all_lora_list()
905
+ if not models: return []
906
+ tupled_list = []
907
+ for model in models:
908
+ #if not model: continue # to avoid GUI-related bug
909
+ basename = Path(model).stem
910
+ key = to_lora_key(model)
911
+ items = None
912
+ if key in loras_dict.keys():
913
+ items = loras_dict.get(key, None)
914
+ else:
915
+ items = get_civitai_info(model)
916
+ if items != None:
917
+ loras_dict[key] = items
918
+ name = basename
919
+ value = model
920
+ if items and items[2] != "":
921
+ if items[1] == "Pony":
922
+ name = f"{basename} (for {items[1]}🐴, {items[2]})"
923
+ else:
924
+ name = f"{basename} (for {items[1]}, {items[2]})"
925
+ tupled_list.append((name, value))
926
+ return tupled_list
927
+
928
+
929
+ def update_lora_dict(path: str):
930
+ global loras_dict
931
+ key = to_lora_key(path)
932
+ if key in loras_dict.keys(): return
933
+ items = get_civitai_info(path)
934
+ if items == None: return
935
+ loras_dict[key] = items
936
+
937
+
938
+ def download_lora(dl_urls: str):
939
+ global loras_url_to_path_dict
940
+ dl_path = ""
941
+ before = get_local_model_list(directory_loras)
942
+ urls = []
943
+ for url in [url.strip() for url in dl_urls.split(',')]:
944
+ local_path = f"{directory_loras}/{url.split('/')[-1]}"
945
+ if not Path(local_path).exists():
946
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
947
+ urls.append(url)
948
+ after = get_local_model_list(directory_loras)
949
+ new_files = list_sub(after, before)
950
+ i = 0
951
+ for file in new_files:
952
+ path = Path(file)
953
+ if path.exists():
954
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
955
+ path.resolve().rename(new_path.resolve())
956
+ loras_url_to_path_dict[urls[i]] = str(new_path)
957
+ update_lora_dict(str(new_path))
958
+ dl_path = str(new_path)
959
+ i += 1
960
+ return dl_path
961
+
962
+
963
+ def copy_lora(path: str, new_path: str):
964
+ import shutil
965
+ if path == new_path: return new_path
966
+ cpath = Path(path)
967
+ npath = Path(new_path)
968
+ if cpath.exists():
969
+ try:
970
+ shutil.copy(str(cpath.resolve()), str(npath.resolve()))
971
+ except Exception:
972
+ return None
973
+ update_lora_dict(str(npath))
974
+ return new_path
975
+ else:
976
+ return None
977
+
978
+
979
+ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str):
980
+ path = download_lora(dl_urls)
981
+ if path:
982
+ if not lora1 or lora1 == "None":
983
+ lora1 = path
984
+ elif not lora2 or lora2 == "None":
985
+ lora2 = path
986
+ elif not lora3 or lora3 == "None":
987
+ lora3 = path
988
+ elif not lora4 or lora4 == "None":
989
+ lora4 = path
990
+ elif not lora5 or lora5 == "None":
991
+ lora5 = path
992
+ choices = get_all_lora_tupled_list()
993
+ return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
994
+ gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
995
+
996
+
997
+ def set_prompt_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
998
+ import re
999
+ lora1 = get_valid_lora_name(lora1)
1000
+ lora2 = get_valid_lora_name(lora2)
1001
+ lora3 = get_valid_lora_name(lora3)
1002
+ lora4 = get_valid_lora_name(lora4)
1003
+ lora5 = get_valid_lora_name(lora5)
1004
+ if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
1005
+ lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
1006
+ lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
1007
+ lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
1008
+ lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
1009
+ lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
1010
+ on1, label1, tag1, md1 = get_lora_info(lora1)
1011
+ on2, label2, tag2, md2 = get_lora_info(lora2)
1012
+ on3, label3, tag3, md3 = get_lora_info(lora3)
1013
+ on4, label4, tag4, md4 = get_lora_info(lora4)
1014
+ on5, label5, tag5, md5 = get_lora_info(lora5)
1015
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1016
+ prompts = prompt.split(",") if prompt else []
1017
+ for p in prompts:
1018
+ p = str(p).strip()
1019
+ if "<lora" in p:
1020
+ result = re.findall(r'<lora:(.+?):(.+?)>', p)
1021
+ if not result: continue
1022
+ key = result[0][0]
1023
+ wt = result[0][1]
1024
+ path = to_lora_path(key)
1025
+ if not key in loras_dict.keys() or not path:
1026
+ path = get_valid_lora_name(path)
1027
+ if not path or path == "None": continue
1028
+ if path in lora_paths:
1029
+ continue
1030
+ elif not on1:
1031
+ lora1 = path
1032
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1033
+ lora1_wt = safe_float(wt)
1034
+ on1 = True
1035
+ elif not on2:
1036
+ lora2 = path
1037
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1038
+ lora2_wt = safe_float(wt)
1039
+ on2 = True
1040
+ elif not on3:
1041
+ lora3 = path
1042
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1043
+ lora3_wt = safe_float(wt)
1044
+ on3 = True
1045
+ elif not on4:
1046
+ lora4 = path
1047
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1048
+ lora4_wt = safe_float(wt)
1049
+ on4, label4, tag4, md4 = get_lora_info(lora4)
1050
+ elif not on5:
1051
+ lora5 = path
1052
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1053
+ lora5_wt = safe_float(wt)
1054
+ on5 = True
1055
+ return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
1056
+
1057
+
1058
+ def apply_lora_prompt(prompt: str, lora_info: str):
1059
+ if lora_info == "None": return gr.update(value=prompt)
1060
+ tags = prompt.split(",") if prompt else []
1061
+ prompts = normalize_prompt_list(tags)
1062
+ lora_tag = lora_info.replace("/",",")
1063
+ lora_tags = lora_tag.split(",") if str(lora_info) != "None" else []
1064
+ lora_prompts = normalize_prompt_list(lora_tags)
1065
+ empty = [""]
1066
+ prompt = ", ".join(list_uniq(prompts + lora_prompts) + empty)
1067
+ return gr.update(value=prompt)
1068
+
1069
+
1070
+ def update_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
1071
+ import re
1072
+ on1, label1, tag1, md1 = get_lora_info(lora1)
1073
+ on2, label2, tag2, md2 = get_lora_info(lora2)
1074
+ on3, label3, tag3, md3 = get_lora_info(lora3)
1075
+ on4, label4, tag4, md4 = get_lora_info(lora4)
1076
+ on5, label5, tag5, md5 = get_lora_info(lora5)
1077
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1078
+ prompts = prompt.split(",") if prompt else []
1079
+ output_prompts = []
1080
+ for p in prompts:
1081
+ p = str(p).strip()
1082
+ if "<lora" in p:
1083
+ result = re.findall(r'<lora:(.+?):(.+?)>', p)
1084
+ if not result: continue
1085
+ key = result[0][0]
1086
+ wt = result[0][1]
1087
+ path = to_lora_path(key)
1088
+ if not key in loras_dict.keys() or not path: continue
1089
+ if path in lora_paths:
1090
+ output_prompts.append(f"<lora:{to_lora_key(path)}:{safe_float(wt):.2f}>")
1091
+ elif p:
1092
+ output_prompts.append(p)
1093
+ lora_prompts = []
1094
+ if on1: lora_prompts.append(f"<lora:{to_lora_key(lora1)}:{lora1_wt:.2f}>")
1095
+ if on2: lora_prompts.append(f"<lora:{to_lora_key(lora2)}:{lora2_wt:.2f}>")
1096
+ if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
1097
+ if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
1098
+ if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
1099
+ output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
1100
+ choices = get_all_lora_tupled_list()
1101
+ return gr.update(value=output_prompt), gr.update(value=lora1, choices=choices), gr.update(value=lora1_wt),\
1102
+ gr.update(value=tag1, label=label1, visible=on1), gr.update(visible=on1), gr.update(value=md1, visible=on1),\
1103
+ gr.update(value=lora2, choices=choices), gr.update(value=lora2_wt),\
1104
+ gr.update(value=tag2, label=label2, visible=on2), gr.update(visible=on2), gr.update(value=md2, visible=on2),\
1105
+ gr.update(value=lora3, choices=choices), gr.update(value=lora3_wt),\
1106
+ gr.update(value=tag3, label=label3, visible=on3), gr.update(visible=on3), gr.update(value=md3, visible=on3),\
1107
+ gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
1108
+ gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
1109
+ gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
1110
+ gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
1111
+
1112
+
1113
+ def search_civitai_lora(query, base_model):
1114
+ global civitai_lora_last_results
1115
+ items = search_lora_on_civitai(query, base_model)
1116
+ if not items: return gr.update(choices=[("", "")], value="", visible=False),\
1117
+ gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
1118
+ civitai_lora_last_results = {}
1119
+ choices = []
1120
+ for item in items:
1121
+ base_model_name = "Pony🐴" if item['base_model'] == "Pony" else item['base_model']
1122
+ name = f"{item['name']} (for {base_model_name} / By: {item['creator']} / Tags: {', '.join(item['tags'])})"
1123
+ value = item['dl_url']
1124
+ choices.append((name, value))
1125
+ civitai_lora_last_results[value] = item
1126
+ if not choices: return gr.update(choices=[("", "")], value="", visible=False),\
1127
+ gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
1128
+ result = civitai_lora_last_results.get(choices[0][1], "None")
1129
+ md = result['md'] if result else ""
1130
+ return gr.update(choices=choices, value=choices[0][1], visible=True), gr.update(value=md, visible=True),\
1131
+ gr.update(visible=True), gr.update(visible=True)
1132
+
1133
+
1134
+ def select_civitai_lora(search_result):
1135
+ if not "http" in search_result: return gr.update(value=""), gr.update(value="None", visible=True)
1136
+ result = civitai_lora_last_results.get(search_result, "None")
1137
+ md = result['md'] if result else ""
1138
+ return gr.update(value=search_result), gr.update(value=md, visible=True)
1139
+
1140
+
1141
+ def search_civitai_lora_json(query, base_model):
1142
+ results = {}
1143
+ items = search_lora_on_civitai(query, base_model)
1144
+ if not items: return gr.update(value=results)
1145
+ for item in items:
1146
+ results[item['dl_url']] = item
1147
+ return gr.update(value=results)
1148
+
1149
+
1150
+ quality_prompt_list = [
1151
+ {
1152
+ "name": "None",
1153
+ "prompt": "",
1154
+ "negative_prompt": "lowres",
1155
+ },
1156
+ {
1157
+ "name": "Animagine Common",
1158
+ "prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres",
1159
+ "negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
1160
+ },
1161
+ {
1162
+ "name": "Pony Anime Common",
1163
+ "prompt": "source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres",
1164
+ "negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
1165
+ },
1166
+ {
1167
+ "name": "Pony Common",
1168
+ "prompt": "source_anime, score_9, score_8_up, score_7_up",
1169
+ "negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
1170
+ },
1171
+ {
1172
+ "name": "Animagine Standard v3.0",
1173
+ "prompt": "masterpiece, best quality",
1174
+ "negative_prompt": "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name",
1175
+ },
1176
+ {
1177
+ "name": "Animagine Standard v3.1",
1178
+ "prompt": "masterpiece, best quality, very aesthetic, absurdres",
1179
+ "negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
1180
+ },
1181
+ {
1182
+ "name": "Animagine Light v3.1",
1183
+ "prompt": "(masterpiece), best quality, very aesthetic, perfect face",
1184
+ "negative_prompt": "(low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn",
1185
+ },
1186
+ {
1187
+ "name": "Animagine Heavy v3.1",
1188
+ "prompt": "(masterpiece), (best quality), (ultra-detailed), very aesthetic, illustration, disheveled hair, perfect composition, moist skin, intricate details",
1189
+ "negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair, extra digit, fewer digits, cropped, worst quality, low quality, very displeasing",
1190
+ },
1191
+ ]
1192
+
1193
+
1194
+ style_list = [
1195
+ {
1196
+ "name": "None",
1197
+ "prompt": "",
1198
+ "negative_prompt": "",
1199
+ },
1200
+ {
1201
+ "name": "Cinematic",
1202
+ "prompt": "cinematic still, emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
1203
+ "negative_prompt": "cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
1204
+ },
1205
+ {
1206
+ "name": "Photographic",
1207
+ "prompt": "cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed",
1208
+ "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
1209
+ },
1210
+ {
1211
+ "name": "Anime",
1212
+ "prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed",
1213
+ "negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
1214
+ },
1215
+ {
1216
+ "name": "Manga",
1217
+ "prompt": "manga style, vibrant, high-energy, detailed, iconic, Japanese comic style",
1218
+ "negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
1219
+ },
1220
+ {
1221
+ "name": "Digital Art",
1222
+ "prompt": "concept art, digital artwork, illustrative, painterly, matte painting, highly detailed",
1223
+ "negative_prompt": "photo, photorealistic, realism, ugly",
1224
+ },
1225
+ {
1226
+ "name": "Pixel art",
1227
+ "prompt": "pixel-art, low-res, blocky, pixel art style, 8-bit graphics",
1228
+ "negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
1229
+ },
1230
+ {
1231
+ "name": "Fantasy art",
1232
+ "prompt": "ethereal fantasy concept art, magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
1233
+ "negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
1234
+ },
1235
+ {
1236
+ "name": "Neonpunk",
1237
+ "prompt": "neonpunk style, cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
1238
+ "negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
1239
+ },
1240
+ {
1241
+ "name": "3D Model",
1242
+ "prompt": "professional 3d model, octane render, highly detailed, volumetric, dramatic lighting",
1243
+ "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
1244
+ },
1245
+ ]
1246
+
1247
+
1248
+ preset_styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
1249
+ preset_quality = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in quality_prompt_list}
1250
+
1251
+
1252
+ def process_style_prompt(prompt: str, neg_prompt: str, styles_key: str = "None", quality_key: str = "None"):
1253
+ def to_list(s):
1254
+ return [x.strip() for x in s.split(",") if not s == ""]
1255
+
1256
+ def list_sub(a, b):
1257
+ return [e for e in a if e not in b]
1258
+
1259
+ def list_uniq(l):
1260
+ return sorted(set(l), key=l.index)
1261
+
1262
+ animagine_ps = to_list("anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres")
1263
+ animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
1264
+ pony_ps = to_list("source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
1265
+ pony_nps = to_list("source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
1266
+ prompts = to_list(prompt)
1267
+ neg_prompts = to_list(neg_prompt)
1268
+
1269
+ all_styles_ps = []
1270
+ all_styles_nps = []
1271
+ for d in style_list:
1272
+ all_styles_ps.extend(to_list(str(d.get("prompt", ""))))
1273
+ all_styles_nps.extend(to_list(str(d.get("negative_prompt", ""))))
1274
+
1275
+ all_quality_ps = []
1276
+ all_quality_nps = []
1277
+ for d in quality_prompt_list:
1278
+ all_quality_ps.extend(to_list(str(d.get("prompt", ""))))
1279
+ all_quality_nps.extend(to_list(str(d.get("negative_prompt", ""))))
1280
+
1281
+ quality_ps = to_list(preset_quality[quality_key][0])
1282
+ quality_nps = to_list(preset_quality[quality_key][1])
1283
+ styles_ps = to_list(preset_styles[styles_key][0])
1284
+ styles_nps = to_list(preset_styles[styles_key][1])
1285
+
1286
+ prompts = list_sub(prompts, animagine_ps + pony_ps + all_styles_ps + all_quality_ps)
1287
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + all_styles_nps + all_quality_nps)
1288
+
1289
+ last_empty_p = [""] if not prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
1290
+ last_empty_np = [""] if not neg_prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
1291
+
1292
+ if type == "Animagine":
1293
+ prompts = prompts + animagine_ps
1294
+ neg_prompts = neg_prompts + animagine_nps
1295
+ elif type == "Pony":
1296
+ prompts = prompts + pony_ps
1297
+ neg_prompts = neg_prompts + pony_nps
1298
+
1299
+ prompts = prompts + styles_ps + quality_ps
1300
+ neg_prompts = neg_prompts + styles_nps + quality_nps
1301
+
1302
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
1303
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
1304
+
1305
+ return gr.update(value=prompt), gr.update(value=neg_prompt)
1306
+
1307
+
1308
+ from PIL import Image
1309
+ def save_images(images: list[Image.Image], metadatas: list[str]):
1310
+ from PIL import PngImagePlugin
1311
+ import uuid
1312
+ try:
1313
+ output_images = []
1314
+ for image, metadata in zip(images, metadatas):
1315
+ info = PngImagePlugin.PngInfo()
1316
+ info.add_text("metadata", metadata)
1317
+ savefile = f"{str(uuid.uuid4())}.png"
1318
+ image.save(savefile, "PNG", pnginfo=info)
1319
+ output_images.append(str(Path(savefile).resolve()))
1320
+ return output_images
1321
+ except Exception as e:
1322
+ print(f"Failed to save image file: {e}")
1323
+ raise Exception(f"Failed to save image file:") from e
env.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
4
+ hf_token = os.environ.get("HF_TOKEN")
5
+ hf_read_token = os.environ.get('HF_READ_TOKEN') # only use for private repo
6
+
7
+ # - **List Models**
8
+ load_diffusers_format_model = [
9
+ 'votepurchase/animagine-xl-3.1',
10
+ 'votepurchase/NSFW-GEN-ANIME-v2',
11
+ 'votepurchase/kivotos-xl-2.0',
12
+ 'votepurchase/holodayo-xl-2.1',
13
+ 'votepurchase/ponyDiffusionV6XL',
14
+ 'votepurchase/AnythingXL_xl',
15
+ 'votepurchase/7thAnimeXLPonyA_v10',
16
+ 'votepurchase/ChilloutMix',
17
+ 'votepurchase/NovelAIRemix',
18
+ 'votepurchase/NSFW-gen-v2',
19
+ 'votepurchase/PerfectDeliberate-Anime_v2',
20
+ 'votepurchase/realpony-xl',
21
+ 'votepurchase/artiwaifu-diffusion-1.0',
22
+ 'votepurchase/Starry-XL-v5.2',
23
+ 'votepurchase/Yaki-Dofu-Mix',
24
+ 'votepurchase/ebara-pony-v1-sdxl',
25
+ 'votepurchase/waiANIMIXPONYXL_v10',
26
+ 'votepurchase/counterfeitV30_v30',
27
+ 'votepurchase/ebara-pony',
28
+ 'votepurchase/Realistic_Vision_V1.4',
29
+ 'votepurchase/pony',
30
+ 'votepurchase/ponymatureSDXL_ponyeclipse10',
31
+ 'votepurchase/waiREALMIX_v70',
32
+ 'votepurchase/waiREALCN_v10',
33
+ 'votepurchase/PVCStyleModelMovable_pony151',
34
+ 'votepurchase/PVCStyleModelMovable_beta27Realistic',
35
+ 'votepurchase/PVCStyleModelFantasy_beta12',
36
+ 'votepurchase/pvcxl-v1-lora',
37
+ 'votepurchase/Realistic_Vision_V2.0',
38
+ 'votepurchase/RealVisXL_V4.0',
39
+ 'votepurchase/juggernautXL_hyper_8step_sfw',
40
+ 'votepurchase/ponyRealism_v21MainVAE',
41
+ 'stabilityai/stable-diffusion-xl-base-1.0',
42
+ 'cagliostrolab/animagine-xl-3.1',
43
+ 'misri/epicrealismXL_v7FinalDestination',
44
+ 'misri/juggernautXL_juggernautX',
45
+ 'misri/zavychromaxl_v80',
46
+ 'SG161222/RealVisXL_V4.0',
47
+ 'misri/newrealityxlAllInOne_Newreality40',
48
+ 'eienmojiki/Anything-XL',
49
+ 'eienmojiki/Starry-XL-v5.2',
50
+ 'gsdf/CounterfeitXL',
51
+ 'kitty7779/ponyDiffusionV6XL',
52
+ 'yodayo-ai/clandestine-xl-1.0',
53
+ 'yodayo-ai/kivotos-xl-2.0',
54
+ 'yodayo-ai/holodayo-xl-2.1',
55
+ 'digiplay/majicMIX_sombre_v2',
56
+ 'digiplay/majicMIX_realistic_v6',
57
+ 'digiplay/majicMIX_realistic_v7',
58
+ 'digiplay/DreamShaper_8',
59
+ 'digiplay/BeautifulArt_v1',
60
+ 'digiplay/DarkSushi2.5D_v1',
61
+ 'digiplay/darkphoenix3D_v1.1',
62
+ 'digiplay/BeenYouLiteL11_diffusers',
63
+ 'rubbrband/revAnimated_v2Rebirth',
64
+ 'youknownothing/cyberrealistic_v50',
65
+ 'votepurchase/counterfeitV30_v30',
66
+ 'Meina/MeinaMix_V11',
67
+ 'Meina/MeinaUnreal_V5',
68
+ 'Meina/MeinaPastel_V7',
69
+ 'rubbrband/realcartoon3d_v16',
70
+ 'rubbrband/realcartoonRealistic_v14',
71
+ 'KBlueLeaf/Kohaku-XL-Epsilon-rev2',
72
+ 'KBlueLeaf/Kohaku-XL-Epsilon-rev3',
73
+ 'KBlueLeaf/Kohaku-XL-Zeta',
74
+ 'kayfahaarukku/UrangDiffusion-1.4',
75
+ 'Eugeoter/artiwaifu-diffusion-2.0',
76
+ 'Raelina/Rae-Diffusion-XL-V2',
77
+ 'Raelina/Raemu-XL-V4',
78
+ ]
79
+
80
+ # List all Models for specified user
81
+ HF_MODEL_USER_LIKES = ["votepurchase"] # sorted by number of likes
82
+ HF_MODEL_USER_EX = ["John6666"] # sorted by a special rule
83
+
84
+
85
+ # - **Download Models**
86
+ download_model_list = [
87
+ ]
88
+
89
+ # - **Download VAEs**
90
+ download_vae_list = [
91
+ 'https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/resolve/main/sdxl.vae.safetensors?download=true',
92
+ 'https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-c-1.1-b-0.5.safetensors?download=true',
93
+ "https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/blob/main/sdxl_vae-fp16fix-blessed.safetensors",
94
+ "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt",
95
+ "https://huggingface.co/stabilityai/sd-vae-ft-ema-original/resolve/main/vae-ft-ema-560000-ema-pruned.ckpt",
96
+ ]
97
+
98
+ # - **Download LoRAs**
99
+ download_lora_list = [
100
+ ]
101
+
102
+ # Download Embeddings
103
+ download_embeds = [
104
+ 'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
105
+ 'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
106
+ 'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
107
+ ]
108
+
109
+ directory_models = 'models'
110
+ os.makedirs(directory_models, exist_ok=True)
111
+ directory_loras = 'loras'
112
+ os.makedirs(directory_loras, exist_ok=True)
113
+ directory_vaes = 'vaes'
114
+ os.makedirs(directory_vaes, exist_ok=True)
115
+ directory_embeds = 'embedings'
116
+ os.makedirs(directory_embeds, exist_ok=True)
117
+
118
+ directory_embeds_sdxl = 'embedings_xl'
119
+ os.makedirs(directory_embeds_sdxl, exist_ok=True)
120
+ directory_embeds_positive_sdxl = 'embedings_xl/positive'
121
+ os.makedirs(directory_embeds_positive_sdxl, exist_ok=True)
122
+
123
+ HF_LORA_PRIVATE_REPOS1 = ['John6666/loratest1', 'John6666/loratest3', 'John6666/loratest4', 'John6666/loratest6']
124
+ HF_LORA_PRIVATE_REPOS2 = ['John6666/loratest10', 'John6666/loratest11','John6666/loratest'] # to be sorted as 1 repo
125
+ HF_LORA_PRIVATE_REPOS = HF_LORA_PRIVATE_REPOS1 + HF_LORA_PRIVATE_REPOS2
126
+ HF_LORA_ESSENTIAL_PRIVATE_REPO = 'John6666/loratest1' # to be downloaded on run app
127
+ HF_VAE_PRIVATE_REPO = 'John6666/vaetest'
128
+ HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO = 'John6666/embeddingstest'
129
+ HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO = 'John6666/embeddingspositivetest'
ja_to_danbooru/character_series_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
ja_to_danbooru/danbooru_tagtype_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
ja_to_danbooru/ja_danbooru_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
ja_to_danbooru/ja_to_danbooru.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import re
3
+ from pathlib import Path
4
+
5
+
6
+ def load_json_dict(path: str):
7
+ import json
8
+ from pathlib import Path
9
+ dict = {}
10
+ if not Path(path).exists(): return dict
11
+ try:
12
+ with open(path, encoding='utf-8') as f:
13
+ dict = json.load(f)
14
+ except Exception:
15
+ print(f"Failed to open dictionary file: {path}")
16
+ return dict
17
+ return dict
18
+
19
+
20
+ ja_danbooru_dict = load_json_dict('ja_danbooru_dict.json')
21
+ char_series_dict = load_json_dict('character_series_dict.json')
22
+ tagtype_dict = load_json_dict('danbooru_tagtype_dict.json')
23
+
24
+
25
+ def jatags_to_danbooru_tags(jatags: list[str]):
26
+ from rapidfuzz.process import extractOne
27
+ from rapidfuzz.utils import default_process
28
+ keys = list(ja_danbooru_dict.keys())
29
+ ckeys = list(char_series_dict.keys())
30
+ tags = []
31
+ for jatag in jatags:
32
+ jatag = str(jatag).strip()
33
+ s = default_process(str(jatag))
34
+ e1 = extractOne(s, keys, processor=default_process, score_cutoff=90.0)
35
+ if e1:
36
+ tag = str(ja_danbooru_dict[e1[0]])
37
+ tags.append(tag)
38
+ if tag in tagtype_dict.keys() and tagtype_dict[tag] == "character":
39
+ cs = default_process(tag)
40
+ ce1 = extractOne(cs, ckeys, processor=default_process, score_cutoff=95.0)
41
+ if ce1:
42
+ series = str(char_series_dict[ce1[0]])
43
+ tags.append(series)
44
+ return tags
45
+
46
+
47
+ def jatags_to_danbooru(input_tag, input_file, output_file, is_append):
48
+ if input_file and Path(input_file).exists():
49
+ try:
50
+ with open(input_file, 'r', encoding='utf-8') as f:
51
+ input_tag = f.read()
52
+ except Exception:
53
+ print(f"Failed to open input file: {input_file}")
54
+ ja_tags = [tag.strip() for tag in input_tag.split(",")] if input_tag else []
55
+ tags = jatags_to_danbooru_tags(ja_tags)
56
+ output_tags = ja_tags + tags if is_append else tags
57
+ output_tag = ", ".join(output_tags)
58
+ if output_file:
59
+ try:
60
+ with open(output_file, mode='w', encoding="utf-8") as f:
61
+ f.write(output_tag)
62
+ except Exception:
63
+ print(f"Failed to write output file: {output_file}")
64
+ else:
65
+ print(output_tag)
66
+ return output_tag
67
+
68
+
69
+ if __name__ == "__main__":
70
+ parser = argparse.ArgumentParser()
71
+ parser.add_argument("--tags", default=None, type=str, required=False, help="Input tags.")
72
+ parser.add_argument("--file", default=None, type=str, required=False, help="Input tags from a text file.")
73
+ parser.add_argument("--out", default=None, type=str, help="Output to text file.")
74
+ parser.add_argument("--append", default=False, type=bool, help="Whether the output contains the input tags or not.")
75
+
76
+ args = parser.parse_args()
77
+ assert (args.tags, args.file) != (None, None), "Must provide --tags or --file!"
78
+
79
+ jatags_to_danbooru(args.tags, args.file, args.out, args.append)
80
+
81
+
82
+ # Usage:
83
+ # python ja_to_danbooru.py --tags "女の子, 大室櫻子"
84
+ # python danbooru_to_ja.py --file inputtag.txt
85
+ # python danbooru_to_ja.py --file inputtag.txt --append True
86
+ # Datasets: https://huggingface.co/datasets/p1atdev/danbooru-ja-tag-pair-20240715
87
+ # Datasets: https://github.com/ponapon280/danbooru-e621-converter
llmdolphin.py ADDED
@@ -0,0 +1,1089 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ from llama_cpp import Llama
4
+ from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
5
+ from llama_cpp_agent.providers import LlamaCppPythonProvider
6
+ from llama_cpp_agent.chat_history import BasicChatHistory
7
+ from llama_cpp_agent.chat_history.messages import Roles
8
+ from ja_to_danbooru.ja_to_danbooru import jatags_to_danbooru_tags
9
+ import wrapt_timeout_decorator
10
+
11
+
12
+ llm_models_dir = "./llm_models"
13
+ llm_models = {
14
+ #"": ["", MessagesFormatterType.LLAMA_3],
15
+ #"": ["", MessagesFormatterType.MISTRAL],
16
+ #"": ["", MessagesFormatterType.ALPACA],
17
+ #"": ["", MessagesFormatterType.OPEN_CHAT],
18
+ #"": ["", MessagesFormatterType.CHATML],
19
+ #"": ["", MessagesFormatterType.PHI_3],
20
+ "mn-12b-lyra-v2a1-q5_k_m.gguf": ["HalleyStarbun/MN-12B-Lyra-v2a1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
21
+ "L3-8B-Tamamo-v1.i1-Q5_K_M.gguf": ["mradermacher/L3-8B-Tamamo-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
22
+ "Instant-RP-Noodles-12B-v1.3.Q4_K_M.gguf": ["mradermacher/Instant-RP-Noodles-12B-v1.3-GGUF", MessagesFormatterType.MISTRAL],
23
+ "MN-12B-Lyra-v4-Q4_K_M.gguf": ["bartowski/MN-12B-Lyra-v4-GGUF", MessagesFormatterType.CHATML],
24
+ "Lyra4-Gutenberg-12B.Q4_K_M.gguf": ["mradermacher/Lyra4-Gutenberg-12B-GGUF", MessagesFormatterType.CHATML],
25
+ "Llama-3.1-8B-EZO-1.1-it.Q5_K_M.gguf": ["mradermacher/Llama-3.1-8B-EZO-1.1-it-GGUF", MessagesFormatterType.MISTRAL],
26
+ "MN-12B-Starcannon-v1.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starcannon-v1-i1-GGUF", MessagesFormatterType.MISTRAL],
27
+ "MN-12B-Starcannon-v2.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starcannon-v2-i1-GGUF", MessagesFormatterType.CHATML],
28
+ "MN-12B-Starcannon-v3.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starcannon-v3-i1-GGUF", MessagesFormatterType.CHATML],
29
+ "MN-12B-Starcannon-v4-unofficial.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starcannon-v4-unofficial-i1-GGUF", MessagesFormatterType.MISTRAL],
30
+ "MN-12B-Starsong-v1.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starsong-v1-i1-GGUF", MessagesFormatterType.CHATML],
31
+ "StarDust-12b-v1-Q4_K_M.gguf": ["Luni/StarDust-12b-v1-GGUF", MessagesFormatterType.MISTRAL],
32
+ "Lumimaid-Magnum-12B.i1-Q4_K_M.gguf": ["mradermacher/Lumimaid-Magnum-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
33
+ "Nemo-12B-Marlin-v1.i1-Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v1-i1-GGUF", MessagesFormatterType.MISTRAL],
34
+ "Nemo-12B-Marlin-v2.i1-Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v2-i1-GGUF", MessagesFormatterType.MISTRAL],
35
+ "Nemo-12B-Marlin-v3.Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v3-GGUF", MessagesFormatterType.MISTRAL],
36
+ "Nemo-12B-Marlin-v4.i1-Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v4-i1-GGUF", MessagesFormatterType.MISTRAL],
37
+ "Nemo-12B-Marlin-v5-Q4_K_M.gguf": ["starble-dev/Nemo-12B-Marlin-v5-GGUF", MessagesFormatterType.CHATML],
38
+ "Nemo-12B-Marlin-v7.Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v7-GGUF", MessagesFormatterType.MISTRAL],
39
+ "Nemo-12B-Marlin-v8.Q4_K_S.gguf": ["mradermacher/Nemo-12B-Marlin-v8-GGUF", MessagesFormatterType.MISTRAL],
40
+ "NemoDori-v0.2-Upscaled.1-14B.Q4_K_M.gguf": ["mradermacher/NemoDori-v0.2-Upscaled.1-14B-GGUF", MessagesFormatterType.MISTRAL],
41
+ "Fireball-12B-v1.0.i1-Q4_K_M.gguf": ["mradermacher/Fireball-12B-v1.0-i1-GGUF", MessagesFormatterType.MISTRAL],
42
+ "Fireball-Mistral-Nemo-Base-2407-sft-v2.2a.Q4_K_M.gguf": ["mradermacher/Fireball-Mistral-Nemo-Base-2407-sft-v2.2a-GGUF", MessagesFormatterType.MISTRAL],
43
+ "T-III-12B.Q4_K_M.gguf": ["mradermacher/T-III-12B-GGUF", MessagesFormatterType.CHATML],
44
+ "T-IIIa-12B.Q4_K_S.gguf": ["mradermacher/T-IIIa-12B-GGUF", MessagesFormatterType.MISTRAL],
45
+ "Type-IV-Last-12B.Q4_K_M.gguf": ["mradermacher/Type-IV-Last-12B-GGUF", MessagesFormatterType.CHATML],
46
+ "StorieCreative.i1-Q4_K_S.gguf": ["mradermacher/StorieCreative-i1-GGUF", MessagesFormatterType.MISTRAL],
47
+ "Deutscher-Pantheon-12B.Q4_K_M.gguf": ["mradermacher/Deutscher-Pantheon-12B-GGUF", MessagesFormatterType.MISTRAL],
48
+ "guns-and-roses-r1-Q4_K_L-imat.gguf": ["Reiterate3680/guns-and-roses-r1-GGUF", MessagesFormatterType.MISTRAL],
49
+ "MagnumChronos.Q4_K_M.gguf": ["mradermacher/MagnumChronos-GGUF", MessagesFormatterType.CHATML],
50
+ "StarDust-12b-v2.i1-Q5_K_M.gguf": ["mradermacher/StarDust-12b-v2-i1-GGUF", MessagesFormatterType.CHATML],
51
+ "Rocinante-12B-v2c-Q4_K_M.gguf": ["TheDrummer/UnslopNemo-v1-GGUF", MessagesFormatterType.MISTRAL],
52
+ "mn-maghin-12b-q6_k.gguf": ["rityak/MN-Maghin-12B-Q6_K-GGUF", MessagesFormatterType.MISTRAL],
53
+ "Trinas_Nectar-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Trinas_Nectar-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
54
+ "ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
55
+ "ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
56
+ "L3.1-Vulca-Epith-Bluegrade-v0.2-8B.q8_0.gguf": ["kromquant/L3.1-Vulca-Epith-Bluegrade-v0.2-8B-GGUFs", MessagesFormatterType.LLAMA_3],
57
+ "llama-3.1-8b-omnimatrix-iq4_nl-imat.gguf": ["bunnycore/Llama-3.1-8B-OmniMatrix-IQ4_NL-GGUF", MessagesFormatterType.LLAMA_3],
58
+ "L3.1-Artemis-d-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Artemis-d-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
59
+ "G2-9B-Blackout-R1-Q4_K_M.gguf": ["bartowski/G2-9B-Blackout-R1-GGUF", MessagesFormatterType.ALPACA],
60
+ "FireStorm-Llama-3.1-8B.i1-Q5_K_M.gguf": ["mradermacher/FireStorm-Llama-3.1-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
61
+ "gemma-2-ifable-9b-q6_k.gguf": ["nengo/gemma-2-Ifable-9B-Q6_K-GGUF", MessagesFormatterType.ALPACA],
62
+ "llama-3.1-8b-hypernova-abliteration-q5_k_m.gguf": ["bunnycore/LLama-3.1-8B-HyperNova-abliteration-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
63
+ "llama-3.1-8b-hypernova-q4_k_m.gguf": ["bunnycore/LLama-3.1-8B-HyperNova-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
64
+ "Llama-3.1-SuperNova-Lite-lorabilterated-8B.i1-Q4_K_S.gguf": ["mradermacher/Llama-3.1-SuperNova-Lite-lorabilterated-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
65
+ "book-gut12b-q4_k_m.gguf": ["ClaudioItaly/Book-Gut12B-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
66
+ "chocolatine-14b-instruct-dpo-v1.2-q4_k_m.gguf": ["jpacifico/Chocolatine-14B-Instruct-DPO-v1.2-Q4_K_M-GGUF", MessagesFormatterType.PHI_3],
67
+ "Nymph_8B.Q4_K_M.gguf": ["QuantFactory/Nymph_8B-GGUF", MessagesFormatterType.LLAMA_3],
68
+ "l3.1-8b-rp-test-004-slerp-q8_0.gguf": ["v000000/L3.1-Storniitova-8B-Q8_0-GGUF", MessagesFormatterType.MISTRAL],
69
+ "white_dwarf-q4_k_m.gguf": ["Daemontatox/White_dwarf-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
70
+ "Gemma2-9b-Max.i1-Q4_K_M.gguf": ["mradermacher/Gemma2-9b-Max-i1-GGUF", MessagesFormatterType.ALPACA],
71
+ "Evolutions-Reflex.Q4_K_M.gguf": ["mradermacher/Evolutions-Reflex-GGUF", MessagesFormatterType.MISTRAL],
72
+ "IceEspressoRPv6.Q5_K_M.gguf": ["mradermacher/IceEspressoRPv6-GGUF", MessagesFormatterType.MISTRAL],
73
+ "Heart_Stolen-ALT-8B-Model_Stock.Q5_K_M.gguf": ["mradermacher/Heart_Stolen-ALT-8B-Model_Stock-GGUF", MessagesFormatterType.LLAMA_3],
74
+ "Pulzar-DPO-7.Q5_K_M.gguf": ["mradermacher/Pulzar-DPO-7-GGUF", MessagesFormatterType.MISTRAL],
75
+ "HermesNova-Llama-3.1-8B.Q4_K_M.gguf": ["mradermacher/HermesNova-Llama-3.1-8B-GGUF", MessagesFormatterType.LLAMA_3],
76
+ "IceEspressoRPv2-7b.Q5_K_M.gguf": ["mradermacher/IceEspressoRPv2-7b-GGUF", MessagesFormatterType.MISTRAL],
77
+ "gemma-2-9B-it-advanced-v2.1.Q4_K_M.gguf": ["RichardErkhov/jsgreenawalt_-_gemma-2-9B-it-advanced-v2.1-gguf", MessagesFormatterType.ALPACA],
78
+ "Gemma-9B-ColdBrew-Testing.Q4_K_M.gguf": ["mradermacher/Gemma-9B-ColdBrew-Testing-GGUF", MessagesFormatterType.ALPACA],
79
+ "opencrystal-12b-l3-v4-beta-q5_k_m.gguf": ["Darkknight535/OpenCrystal-12B-L3-v4-Beta-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
80
+ "HyperLLama3.1-8b-Nova.i1-Q4_K_M.gguf": ["mradermacher/HyperLLama3.1-8b-Nova-i1-GGUF", MessagesFormatterType.LLAMA_3],
81
+ "L3.1-Boshima-b-FIX.Q5_K_M.gguf": ["mradermacher/L3.1-Boshima-b-FIX-GGUF", MessagesFormatterType.LLAMA_3],
82
+ "Maxtopia-13B.i1-Q4_K_M.gguf": ["mradermacher/Maxtopia-13B-i1-GGUF", MessagesFormatterType.ALPACA],
83
+ "Instant-RP-Noodles-12B-v1.4.Q4_K_M.gguf": ["mradermacher/Instant-RP-Noodles-12B-v1.4-GGUF", MessagesFormatterType.MISTRAL],
84
+ "HyperGemma-2-9B.i1-Q4_K_M.gguf": ["mradermacher/HyperGemma-2-9B-i1-GGUF", MessagesFormatterType.ALPACA],
85
+ "reflex-gutenberg-dpo-q5_k_m.gguf": ["ClaudioItaly/Reflex-Gutenberg-DPO-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
86
+ "magnum-picaro-0.7-v2-12b-q5_k_m.gguf": ["Trappu/Magnum-Picaro-0.7-v2-12b-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
87
+ "Llama-3.1-SuperNova-Lite-Q5_K_M.gguf": ["bartowski/Llama-3.1-SuperNova-Lite-GGUF", MessagesFormatterType.LLAMA_3],
88
+ "Fireball-Alpaca-Llama3.1.06-8B-Philos.i1-Q5_K_M.gguf": ["mradermacher/Fireball-Alpaca-Llama3.1.06-8B-Philos-i1-GGUF", MessagesFormatterType.CHATML],
89
+ "fimbmoister-11range-q4_k_m.gguf": ["ClaudioItaly/FimbMoister-11Range-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
90
+ "Magot-v1-Gemma2-8k-9B.Q4_K_M.gguf": ["mradermacher/Magot-v1-Gemma2-8k-9B-GGUF", MessagesFormatterType.LLAMA_3],
91
+ "L3.1-Boshima-b.Q5_K_M.gguf": ["mradermacher/L3.1-Boshima-b-GGUF", MessagesFormatterType.LLAMA_3],
92
+ "L3.1-Vulca-Epith-Humbeta-v0.2-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Vulca-Epith-Humbeta-v0.2-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
93
+ "l3.1-forsths-q5_k_m.gguf": ["djuna/L3.1-ForStHS-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
94
+ "l3-umbral-storm-8b-t0.0001-q8_0.gguf": ["v000000/L3-Umbral-Storm-8B-t0.0001-Q8_0-GGUF", MessagesFormatterType.LLAMA_3],
95
+ "IceSakeRP-7b.i1-Q5_K_M.gguf": ["mradermacher/IceSakeRP-7b-i1-GGUF", MessagesFormatterType.ALPACA],
96
+ "llama-3.1-8b-ultra-max-pro-q5_k_m.gguf": ["bunnycore/LLama-3.1-8b-Ultra-Max-Pro-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
97
+ "maxtopia-13b-q4_k_m.gguf": ["ClaudioItaly/Maxtopia-13B-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
98
+ "utopia-range-q4_k_m.gguf": ["ClaudioItaly/Utopia-Range-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
99
+ "L3-Dark-Planet-8B-V2-EOOP-D_AU-Q4_k_m.gguf": ["DavidAU/L3-Dark-Planet-8B-V2-Eight-Orbs-Of-Power-GGUF", MessagesFormatterType.LLAMA_3],
100
+ "mergekit-model_stock-azjsvkh-q4_k_m.gguf": ["DreadPoor/Heart_Stolen-8B-Model_Stock-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
101
+ "Pantheon-RP-1.6.1-12b-Nemo-Q4_K_M.gguf": ["bartowski/Pantheon-RP-1.6.1-12b-Nemo-GGUF", MessagesFormatterType.MISTRAL],
102
+ "mistral-nemo-12b-arliai-rpmax-v1.1-q5_k_m.gguf": ["NikolayKozloff/Mistral-Nemo-12B-ArliAI-RPMax-v1.1-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
103
+ "ggml-model-Q5_K.gguf": ["jeiku/Luna_7B_GGUF", MessagesFormatterType.MISTRAL],
104
+ "Daredevil-8B-abliterated-dpomix.i1-Q5_K_M.gguf": ["mradermacher/Daredevil-8B-abliterated-dpomix-i1-GGUF", MessagesFormatterType.LLAMA_3],
105
+ "MN-12B-Chronos-Gold-Celeste-v1.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Chronos-Gold-Celeste-v1-i1-GGUF", MessagesFormatterType.MISTRAL],
106
+ "MN-HaremUnion-12B.i1-Q4_K_M.gguf": ["mradermacher/MN-HaremUnion-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
107
+ "Evocation.i1-Q4_K_M.gguf": ["mradermacher/Evocation-i1-GGUF", MessagesFormatterType.ALPACA],
108
+ "gemma-2-9b-it-WPO-HB-Q4_K_M.gguf": ["bartowski/gemma-2-9b-it-WPO-HB-GGUF", MessagesFormatterType.ALPACA],
109
+ "MN-12B-Lyra-v4.Q4_K_M.gguf": ["QuantFactory/MN-12B-Lyra-v4-GGUF", MessagesFormatterType.CHATML],
110
+ "Stella-mistral-nemo-12B-v2.i1-Q4_K_M.gguf": ["mradermacher/Stella-mistral-nemo-12B-v2-i1-GGUF", MessagesFormatterType.MISTRAL],
111
+ "Artificium-llama3.1-8B-001.i1-Q5_K_M.gguf": ["mradermacher/Artificium-llama3.1-8B-001-i1-GGUF", MessagesFormatterType.MISTRAL],
112
+ "Sensibilitystory-7B.i1-Q5_K_M.gguf": ["mradermacher/Sensibilitystory-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
113
+ "ataraxy-actually-9b.i1-Q4_K_M.gguf": ["mradermacher/ataraxy-actually-9b-i1-GGUF", MessagesFormatterType.ALPACA],
114
+ "xortron7methedup-slerp-8b-q5_k_m.gguf": ["darkc0de/Xortron7MethedUp-SLERP-8B-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
115
+ "magnaraxy-9b.i1-Q4_K_M.gguf": ["mradermacher/magnaraxy-9b-i1-GGUF", MessagesFormatterType.ALPACA],
116
+ "L3.1-HermesBreakGradientXL-12B.Q4_K_M.gguf": ["mradermacher/L3.1-HermesBreakGradientXL-12B-GGUF", MessagesFormatterType.LLAMA_3],
117
+ "nemo-picaro-v0.1-12b-q4_k_s.gguf": ["Trappu/Nemo-Picaro-v0.1-12B-Q4_K_S-GGUF", MessagesFormatterType.CHATML],
118
+ "magnum-picaro-0.7-12b-q5_k_m.gguf": ["Trappu/Magnum-Picaro-0.7-12b-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
119
+ "gemma-2-gemmama-9b-q5_k_m.gguf": ["djuna/Gemma-2-gemmama-9b-Q5_K_M-GGUF", MessagesFormatterType.ALPACA],
120
+ "mn-haremunion-12b-q6_k.gguf": ["rityak/MN-HaremUnion-12B-Q6_K-GGUF", MessagesFormatterType.MISTRAL],
121
+ "magnum-v3-9b-chatml-Q4_K_M.gguf": ["bartowski/magnum-v3-9b-chatml-GGUF", MessagesFormatterType.ALPACA],
122
+ "stella-mistral-nemo-12b-v2-q5_k_m.gguf": ["Triangle104/Stella-mistral-nemo-12B-v2-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
123
+ "magnaraxy-9b-Q4_K_M.gguf": ["bartowski/magnaraxy-9b-GGUF", MessagesFormatterType.ALPACA],
124
+ "magnum-v3-9b-customgemma2-Q4_K_M.gguf": ["bartowski/magnum-v3-9b-customgemma2-GGUF", MessagesFormatterType.ALPACA],
125
+ "mn-12b-lyra-v4a1-q4_k_m.gguf": ["404ProfileUnknown/MN-12B-Lyra-v4a1-Q4_K_M-GGUF", MessagesFormatterType.CHATML],
126
+ "nemored_1-q4_k_m.gguf": ["Daemontatox/nemored_1-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
127
+ "L3.1-BlueSkyy-8B.Q4_K_S.gguf": ["mradermacher/L3.1-BlueSkyy-8B-GGUF", MessagesFormatterType.LLAMA_3],
128
+ "EvolutionStar.Q5_K_M.gguf": ["mradermacher/EvolutionStar-GGUF", MessagesFormatterType.MISTRAL],
129
+ "Xortron7_Alpha.Q5_K_M.gguf": ["mradermacher/Xortron7_Alpha-GGUF", MessagesFormatterType.LLAMA_3],
130
+ "L3.1-DarkStock-8B.Q4_K_S.gguf": ["mradermacher/L3.1-DarkStock-8B-GGUF", MessagesFormatterType.LLAMA_3],
131
+ "L3.1-IFEvalStock-8B.Q5_K_M.gguf": ["mradermacher/L3.1-IFEvalStock-8B-GGUF", MessagesFormatterType.LLAMA_3],
132
+ "L3-SunnySkyy-8B.Q5_K_M.gguf": ["mradermacher/L3-SunnySkyy-8B-GGUF", MessagesFormatterType.LLAMA_3],
133
+ "Xortron7MethedUp.Q5_K_M.gguf": ["mradermacher/Xortron7MethedUp-GGUF", MessagesFormatterType.LLAMA_3],
134
+ "l3.1-hermesbreakgradient-8b-q8_0.gguf": ["rityak/L3.1-HermesBreakGradient-8B-Q8_0-GGUF", MessagesFormatterType.LLAMA_3],
135
+ "hematoma-gemma-model-stock-9b-q6_k.gguf": ["Nekuromento/Hematoma-Gemma-Model-Stock-9B-Q6_K-GGUF", MessagesFormatterType.ALPACA],
136
+ "l3.1-darkstock-8b-q8_0.gguf": ["rityak/L3.1-DarkStock-8B-Q8_0-GGUF", MessagesFormatterType.MISTRAL],
137
+ "evolutionfimbmoister-11-q4_k_m.gguf": ["ClaudioItaly/EvolutionFimbMoister-11-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
138
+ "IceTea21EnergyDrinkRPV13-DPOv3.Q5_K_M.gguf": ["mradermacher/IceTea21EnergyDrinkRPV13-DPOv3-GGUF", MessagesFormatterType.MISTRAL],
139
+ "top11evolution-q4_k_m.gguf": ["ClaudioItaly/Top11Evolution-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
140
+ "L3.1-FormaxGradient.Q5_K_M.gguf": ["mradermacher/L3.1-FormaxGradient-GGUF", MessagesFormatterType.LLAMA_3],
141
+ "IceEspressoRPv1-7b.i1-Q5_K_M.gguf": ["mradermacher/IceEspressoRPv1-7b-i1-GGUF", MessagesFormatterType.MISTRAL],
142
+ "Ph3della3-14B.Q4_K_M.gguf": ["mradermacher/Ph3della3-14B-GGUF", MessagesFormatterType.PHI_3],
143
+ "Llama-3-bllossom-8B-PM1-finetuned-v1-15.Q5_K_M.gguf": ["mradermacher/Llama-3-bllossom-8B-PM1-finetuned-v1-15-GGUF", MessagesFormatterType.LLAMA_3],
144
+ "Evolutionstory-7B-v3.0.Q5_K_M.gguf": ["mradermacher/Evolutionstory-7B-v3.0-GGUF", MessagesFormatterType.MISTRAL],
145
+ "L3.1-Niitorm-8B-t0.0001.i1.imatrix-Q8_0.gguf": ["v000000/L3.1-Niitorm-8B-t0.0001-i1-imatrix-Q8_0-GGUF", MessagesFormatterType.LLAMA_3],
146
+ "lyris_dev_2-q6_k.gguf": ["v000000/NM-12B-Lyris-dev-2-Q6_K-GGUF", MessagesFormatterType.MISTRAL],
147
+ "nm-12b-lyris-dev-3-q6_k.gguf": ["v000000/NM-12B-Lyris-dev-3-Q6_K-GGUF", MessagesFormatterType.MISTRAL],
148
+ "sensibilitystory-7b-q5_k_m.gguf": ["ClaudioItaly/Sensibilitystory-7B-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
149
+ "NeuralExperiment-7b-MagicCoder-v7.5.i1-Q5_K_M.gguf": ["mradermacher/NeuralExperiment-7b-MagicCoder-v7.5-i1-GGUF", MessagesFormatterType.MISTRAL],
150
+ "dolphin-2.2.1-mistral-7b.Q5_K_M.gguf": ["mradermacher/dolphin-2.2.1-mistral-7b-GGUF", MessagesFormatterType.MISTRAL],
151
+ "L3-Dark-Planet-Horror-City-8B-NEO-IMAT-D_AU-Q5_K_M-imat.gguf": ["DavidAU/L3-Dark-Planet-Horror-City-8B-NEO-Imatrix-GGUF", MessagesFormatterType.LLAMA_3],
152
+ "hyperllama-3.1-8b-iq4_nl-imat.gguf": ["bunnycore/HyperLlama-3.1-8B-IQ4_NL-GGUF", MessagesFormatterType.LLAMA_3],
153
+ "SystemAI-7B.Q5_K_M.gguf": ["mradermacher/SystemAI-7B-GGUF", MessagesFormatterType.MISTRAL],
154
+ "OpenCrystal-15B-L3-v3.Q4_K_M.gguf": ["mradermacher/OpenCrystal-15B-L3-v3-GGUF", MessagesFormatterType.LLAMA_3],
155
+ "Enzo.Q4_K_M.gguf": ["mradermacher/Enzo-GGUF", MessagesFormatterType.MISTRAL],
156
+ "CelineGPT-12B-v0.1.Q4_K_M.gguf": ["mradermacher/CelineGPT-12B-v0.1-GGUF", MessagesFormatterType.MISTRAL],
157
+ "Lama-DPOlphin-8B.Q5_K_M.gguf": ["mradermacher/Lama-DPOlphin-8B-GGUF", MessagesFormatterType.LLAMA_3],
158
+ "L3.1-Artemis-a-8B.i1-Q4_K_S.gguf": ["mradermacher/L3.1-Artemis-a-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
159
+ "L3.1-Artemis-b-8B.Q5_K_M.gguf": ["mradermacher/L3.1-Artemis-b-8B-GGUF", MessagesFormatterType.LLAMA_3],
160
+ "L3.1-Artemis-c-8B.Q4_K_S.gguf": ["mradermacher/L3.1-Artemis-c-8B-GGUF", MessagesFormatterType.LLAMA_3],
161
+ "L3.1-Vulca-Umboshima-8B-bf16.i1-Q4_K_M.gguf": ["mradermacher/L3.1-Vulca-Umboshima-8B-bf16-i1-GGUF", MessagesFormatterType.LLAMA_3],
162
+ "L3.1-Vulca-Umboshima-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Vulca-Umboshima-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
163
+ "Mistral-Nemo-Instruct-2407-abliterated.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-Instruct-2407-abliterated-i1-GGUF", MessagesFormatterType.MISTRAL],
164
+ "L3-Dark-Planet-8B-D_AU-Q5_k_s.gguf": ["DavidAU/L3-Dark-Planet-8B-GGUF", MessagesFormatterType.LLAMA_3],
165
+ "L3-Dark-Planet-Ring-World-8B-F32-D_AU-Q4_k_s.gguf": ["DavidAU/L3-Dark-Planet-Ring-World-8B-F32-GGUF", MessagesFormatterType.LLAMA_3],
166
+ "penny-lexi-llama3.1-8B.Q5_K_M.gguf": ["mradermacher/penny-lexi-llama3.1-8B-GGUF", MessagesFormatterType.LLAMA_3],
167
+ "penny-llama3.1-dolphin2.9.4.Q5_K_M.gguf": ["mradermacher/penny-llama3.1-dolphin2.9.4-GGUF", MessagesFormatterType.MISTRAL],
168
+ "zirel3.1.Q5_K_M.gguf": ["mradermacher/zirel3.1-GGUF", MessagesFormatterType.LLAMA_3],
169
+ "L3.1-8B-komorebi-v0.1-Q5_K_M.gguf": ["Vdr1/L3.1-8B-komorebi-v0.1-GGUF-IQ", MessagesFormatterType.LLAMA_3],
170
+ "Lumimaid-Rocinate-v0.1.Q4_K_M.gguf": ["mradermacher/Lumimaid-Rocinate-v0.1-GGUF", MessagesFormatterType.MISTRAL],
171
+ "Athlon-8B-0.1.Q5_K_M.gguf": ["mradermacher/Athlon-8B-0.1-GGUF", MessagesFormatterType.LLAMA_3],
172
+ "Phenom-12B-0.1.i1-Q5_K_M.gguf": ["mradermacher/Phenom-12B-0.1-i1-GGUF", MessagesFormatterType.MISTRAL],
173
+ "Ph3della-14B.Q4_K_S.gguf": ["mradermacher/Ph3della-14B-GGUF", MessagesFormatterType.PHI_3],
174
+ "alfacentaury11b-q5_k_m.gguf": ["ClaudioItaly/AlfaCentaury11B-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
175
+ "L3-Dark-Planet-8B-NEO-IMAT-D_AU-Q5_K_M-imat.gguf": ["DavidAU/L3-Dark-Planet-8B-NEO-Imatrix-GGUF", MessagesFormatterType.LLAMA_3],
176
+ "penny-dolphin2.9.4.gguf": ["giannisan/penny-dolphin2.9.4-llama3.1-GGUF", MessagesFormatterType.MISTRAL],
177
+ "systemhd-7b-q5_k_m.gguf": ["ClaudioItaly/SystemHD-7B-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
178
+ "lit_red_storm-q4_k_m.gguf": ["Daemontatox/lit_red_storm-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
179
+ "llama-3-youko-8b-instruct.Q5_K_M.gguf": ["mradermacher/llama-3-youko-8b-instruct-GGUF", MessagesFormatterType.LLAMA_3],
180
+ "Fimbulvetr-11B-v2.Q5_K_M.gguf": ["QuantFactory/Fimbulvetr-11B-v2-GGUF", MessagesFormatterType.VICUNA],
181
+ "ogha-8b-model_stock-q4_k_m.gguf": ["DreadPoor/Ogha-8B-model_stock-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
182
+ "westicelemontearp-32k-7b-q5_k_s-imat.gguf": ["woofwolfy/WestIceLemonTeaRP-32k-7b-Q5_K_S-GGUF-Imatrix", MessagesFormatterType.ALPACA],
183
+ "ArliAI-RPMax-12B-v1.1-Q4_K_M.gguf": ["ArliAI/ArliAI-RPMax-12B-v1.1-GGUF", MessagesFormatterType.MISTRAL],
184
+ "l3-8b-serpentine-q4_k_m-imat.gguf": ["woofwolfy/L3-8B-Serpentine-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
185
+ "llama-3-alpha-centauri-v0.1-q4_k_m-imat.gguf": ["woofwolfy/Llama-3-Alpha-Centauri-v0.1-Q4_K_M-GGUF-Imatrix", MessagesFormatterType.LLAMA_3],
186
+ "OpenCrystal-15B-L3-v2.i1-Q4_K_M.gguf": ["mradermacher/OpenCrystal-15B-L3-v2-i1-GGUF", MessagesFormatterType.LLAMA_3],
187
+ "Gukbap-Mistral-7B.i1-Q4_K_M.gguf": ["mradermacher/Gukbap-Mistral-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
188
+ "Gukbap-Qwen2-7B.i1-Q4_K_M.gguf": ["mradermacher/Gukbap-Qwen2-7B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
189
+ "Irina-8B-model_stock.Q5_K_M.gguf": ["mradermacher/Irina-8B-model_stock-GGUF", MessagesFormatterType.MISTRAL],
190
+ "silicon-alice-7b-q5_k_m-imat.gguf": ["woofwolfy/Silicon-Alice-7B-Q5_K_M-GGUF-Imatrix", MessagesFormatterType.MISTRAL],
191
+ "l3.1-8b-rpgrammax-q5_k_m.gguf": ["djuna/L3.1-8B-RPGramMax-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
192
+ "Instant-RP-Noodles-12B-v1.2.Q4_K_M.gguf": ["mradermacher/Instant-RP-Noodles-12B-v1.2-GGUF", MessagesFormatterType.MISTRAL],
193
+ "Ellaria-9B-Q4_K_M.gguf": ["bartowski/Ellaria-9B-GGUF", MessagesFormatterType.ALPACA],
194
+ "evolutionstory-7b-v2.2-q5_k_m.gguf": ["ClaudioItaly/Evolutionstory-7B-v2.2-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
195
+ "NeuroNoise_v2.Q6_K.gguf": ["mradermacher/NeuroNoise_v2-GGUF", MessagesFormatterType.CHATML],
196
+ "daybreak-kunoichi-2dpo-7b.i1-Q5_K_M.gguf": ["mradermacher/daybreak-kunoichi-2dpo-7b-i1-GGUF", MessagesFormatterType.MISTRAL],
197
+ "l3.1-niitorm-8b-latcosx2-q8_0.gguf": ["v000000/L3.1-Niitorm-8B-LATCOSx2-Version-Q8_0-GGUF", MessagesFormatterType.MISTRAL],
198
+ "IceTea21EnergyDrinkRPV13-dpo240.Q4_K_M.gguf": ["mradermacher/IceTea21EnergyDrinkRPV13-dpo240-GGUF", MessagesFormatterType.MISTRAL],
199
+ "topneuralstory-q6_k.gguf": ["ClaudioItaly/TopNeuralStory-Q6_K-GGUF", MessagesFormatterType.MISTRAL],
200
+ #"L3.1-Ablaze-Vulca-v0.2h-8B.q8_0.gguf": ["kromquant/L3.1-Ablaze-Vulca-v0.2h-8B-GGUFs", MessagesFormatterType.MISTRAL],
201
+ "Fireball-12B-v1.13a-philosophers.Q4_K_M.gguf": ["mradermacher/Fireball-12B-v1.13a-philosophers-GGUF", MessagesFormatterType.MISTRAL],
202
+ "l3.1-niitorm-8b-q8_0.gguf": ["v000000/L3.1-Niitorm-8B-t0.0001-Q8_0-GGUF", MessagesFormatterType.MISTRAL],
203
+ "mn-12b-lyra-v3-q5_k_m.gguf": ["HalleyStarbun/MN-12B-Lyra-v3-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
204
+ "mergekit-model_stock-ffibbcs-q4_k_s.gguf": ["DarwinAnim8or/mergekit-model_stock-ffibbcs-Q4_K_S-GGUF", MessagesFormatterType.ALPACA],
205
+ "experiment-wip-being-q4_k_m.gguf": ["DreadPoor/EXPERIMENT-WIP-BEING-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
206
+ "Sunmoy-9B-G2.Q4_K_M.gguf": ["backyardai/Sunmoy-9B-G2-GGUF", MessagesFormatterType.ALPACA],
207
+ "kainaticulous-rp-7b.Q5_K_M.gguf": ["mradermacher/kainaticulous-rp-7b-GGUF", MessagesFormatterType.MISTRAL],
208
+ "OpenCrystal-12B-L3.1-128K.Q4_K_S.gguf": ["mradermacher/OpenCrystal-12B-L3.1-128K-GGUF", MessagesFormatterType.MISTRAL],
209
+ "L3.1-12B-Niitama-v1.1.Q4_K_S.gguf": ["mradermacher/L3.1-12B-Niitama-v1.1-GGUF", MessagesFormatterType.MISTRAL],
210
+ "L3.1-12B-Celeste-V1.5.Q4_K_S.gguf": ["mradermacher/L3.1-12B-Celeste-V1.5-GGUF", MessagesFormatterType.MISTRAL],
211
+ "L3.1-Sthenorm-8B.Q4_K_S.gguf": ["mradermacher/L3.1-Sthenorm-8B-GGUF", MessagesFormatterType.MISTRAL],
212
+ "MN-12B-Estrella-v2.2.Q4_K_M.gguf": ["mradermacher/MN-12B-Estrella-v2.2-GGUF", MessagesFormatterType.MISTRAL],
213
+ "WoonaV1.2-9b.Q4_K_M.gguf": ["QuantFactory/WoonaV1.2-9b-GGUF", MessagesFormatterType.MISTRAL],
214
+ "kainaticulous-rp-7B-Q5_K_M.gguf": ["kainatq/kainaticulous-rp-7b-gguf", MessagesFormatterType.MISTRAL],
215
+ "experiment-wip-q4_k_m.gguf": ["DreadPoor/EXPERIMENT-WIP-Q4_K_M-GGUF", MessagesFormatterType.CHATML],
216
+ "eye_of_the_storm-q4_k_m.gguf": ["Daemontatox/Eye_of_the_Storm-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
217
+ "Fireball-3.1-8B-ORPO.i1-Q5_K_M.gguf": ["mradermacher/Fireball-3.1-8B-ORPO-i1-GGUF", MessagesFormatterType.LLAMA_3],
218
+ "ggml-model-Q4_K_M.gguf": ["lightblue/suzume-llama-3-8B-multilingual-orpo-borda-half-gguf", MessagesFormatterType.LLAMA_3],
219
+ "kukulemon-v3-soul_mix-32k-7B.Q5_K_M.gguf": ["grimjim/kukulemon-v3-soul_mix-32k-7B-GGUF", MessagesFormatterType.MISTRAL],
220
+ #"CestLumi-v2.Q4_K_M.gguf": ["mradermacher/CestLumi-v2-GGUF", MessagesFormatterType.CHATML], # incomplete merges according to the author
221
+ "zephyr-wizard-kuno-royale-BF16-merge-7B.Q5_K_M.gguf": ["grimjim/zephyr-wizard-kuno-royale-BF16-merge-7B-GGUF", MessagesFormatterType.MISTRAL],
222
+ "Llama-3-Luminurse-v0.2-OAS-8B.Q5_K_M.gguf": ["grimjim/Llama-3-Luminurse-v0.2-OAS-8B-GGUF", MessagesFormatterType.LLAMA_3],
223
+ #"L3.1-Ablaze-Vulca-v0.2b-8B.q8_0.gguf": ["kromquant/L3.1-Ablaze-Vulca-v0.2b-8B-GGUFs", MessagesFormatterType.MISTRAL],
224
+ "MN-12B-Lyra-v2a1.Q4_K_M.gguf": ["mradermacher/MN-12B-Lyra-v2a1-GGUF", MessagesFormatterType.MISTRAL],
225
+ "OpenCrystal-8B-L3.Q5_K_M.gguf": ["mradermacher/OpenCrystal-8B-L3-GGUF", MessagesFormatterType.LLAMA_3],
226
+ "MN-CelesteGold-12B-Merge.Q4_K_M.gguf": ["mradermacher/MN-CelesteGold-12B-Merge-GGUF", MessagesFormatterType.CHATML],
227
+ "L3.1-Suze-Vume-calc.Q5_K_M.gguf": ["mradermacher/L3.1-Suze-Vume-calc-GGUF", MessagesFormatterType.MISTRAL],
228
+ "OpenCrystal-8B-L3.i1-Q4_K_M.gguf": ["mradermacher/OpenCrystal-8B-L3-i1-GGUF", MessagesFormatterType.LLAMA_3],
229
+ "cestlumi-v2-q4_k_m.gguf": ["TheDrunkenSnail/CestLumi-v2-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
230
+ "cestlumi-v1-q4_k_m.gguf": ["TheDrunkenSnail/CestLumi-v1-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
231
+ "Lyra_Gutenbergs-Twilight_Magnum-12B-Q4_K_M.gguf": ["bartowski/Lyra_Gutenbergs-Twilight_Magnum-12B-GGUF", MessagesFormatterType.MISTRAL],
232
+ "llama-3.1-8b-ultra-instruct-q4_k_s-imat.gguf": ["Dampfinchen/Llama-3.1-8B-Ultra-Instruct-Q4_K_S-GGUF", MessagesFormatterType.LLAMA_3],
233
+ "kunoichi-lemon-royale-v3-32K-7B.Q5_K_M.gguf": ["grimjim/kunoichi-lemon-royale-v3-32K-7B-GGUF", MessagesFormatterType.MISTRAL],
234
+ "kukulemon-spiked-9B.Q4_K_M.gguf": ["grimjim/kukulemon-spiked-9B-GGUF", MessagesFormatterType.MISTRAL],
235
+ "lemonade-rebase-32k-7B.Q8_0.gguf": ["grimjim/lemonade-rebase-32k-7B-GGUF", MessagesFormatterType.MISTRAL],
236
+ "infinite-lemonade-7B.Q8_0.gguf": ["grimjim/infinite-lemonade-SLERP-7B-GGUF", MessagesFormatterType.MISTRAL],
237
+ "llama-3-merge-pp-instruct-8B.Q8_0.gguf": ["grimjim/llama-3-merge-pp-instruct-8B-GGUF", MessagesFormatterType.LLAMA_3],
238
+ "L3-SthenoMaidBlackroot-12.2B-V1-INSTRUCT-16fp.i1-Q4_K_S.gguf": ["mradermacher/L3-SthenoMaidBlackroot-12.2B-V1-INSTRUCT-16fp-i1-GGUF", MessagesFormatterType.LLAMA_3],
239
+ "Fireball-Mistral-Nemo-Base-2407-Instruct-v1.Q4_K_M.gguf": ["mradermacher/Fireball-Mistral-Nemo-Base-2407-Instruct-v1-GGUF", MessagesFormatterType.MISTRAL],
240
+ "L3-12B-Niitama-v1.Q4_K_M.gguf": ["mradermacher/L3-12B-Niitama-v1-GGUF", MessagesFormatterType.LLAMA_3],
241
+ "L3-12B-Celeste-V1.2.Q4_K_M.gguf": ["mradermacher/L3-12B-Celeste-V1.2-GGUF", MessagesFormatterType.LLAMA_3],
242
+ "OpenCrystal-12B-L3.Q4_K_M.gguf": ["mradermacher/OpenCrystal-12B-L3-GGUF", MessagesFormatterType.LLAMA_3],
243
+ "L3-Stheno-v3.2-12.2B-Instruct.Q4_K_M.gguf": ["mradermacher/L3-Stheno-v3.2-12.2B-Instruct-GGUF", MessagesFormatterType.LLAMA_3],
244
+ "L3-SMB-Instruct-12.2B-F32.Q4_K_M.gguf": ["mradermacher/L3-SMB-Instruct-12.2B-F32-GGUF", MessagesFormatterType.LLAMA_3],
245
+ "OpenCrystal-12B-L3.i1-Q4_K_M.gguf": ["mradermacher/OpenCrystal-12B-L3-i1-GGUF", MessagesFormatterType.LLAMA_3],
246
+ "L3-Lumimaid-12.2B-v0.1-OAS-Instruct.Q4_K_M.gguf": ["mradermacher/L3-Lumimaid-12.2B-v0.1-OAS-Instruct-GGUF", MessagesFormatterType.LLAMA_3],
247
+ "nemo-12b-hiwaifu-Q4_K_L-imat.gguf": ["Reiterate3680/nemo-12b-hiwaifu-GGUF", MessagesFormatterType.MISTRAL],
248
+ "Soliloquy-7B-v3-Q4_K_L-imat.gguf": ["Reiterate3680/Soliloquy-7B-v3-GGUF", MessagesFormatterType.OPEN_CHAT],
249
+ "Lyra-Gutenberg-mistral-nemo-12B.Q4_K_M.gguf": ["mradermacher/Lyra-Gutenberg-mistral-nemo-12B-GGUF", MessagesFormatterType.MISTRAL],
250
+ "Gutensuppe-mistral-nemo-12B.Q4_K_M.gguf": ["mradermacher/Gutensuppe-mistral-nemo-12B-GGUF", MessagesFormatterType.MISTRAL],
251
+ "IceTea21EnergyDrinkRPV13-dpo240-Q8_0.gguf": ["icefog72/IceTea21EnergyDrinkRPV13-dpo240-gguf", MessagesFormatterType.MISTRAL],
252
+ "Instant-RP-Noodles-12B.Q4_K_M.gguf": ["mradermacher/Instant-RP-Noodles-12B-GGUF", MessagesFormatterType.MISTRAL],
253
+ "Violet_Twilight-v0.1_q4_K_M.gguf": ["Epiculous/Violet_Twilight-v0.1-GGUF", MessagesFormatterType.MISTRAL],
254
+ "Llama3.1-vodka.Q4_K_S.gguf": ["mradermacher/Llama3.1-vodka-GGUF", MessagesFormatterType.MISTRAL],
255
+ "L3.1-Pyro-Mantus-v0.1c-8B.q5_k_m.gguf": ["kromquant/L3.1-Pyro-Mantus-v0.1c-8B-GGUFs", MessagesFormatterType.MISTRAL],
256
+ "Llama-3.1-8B-ArliAI-RPMax-v1.1-Q5_K_M.gguf": ["ArliAI/Llama-3.1-8B-ArliAI-RPMax-v1.1-GGUF", MessagesFormatterType.MISTRAL],
257
+ "l3-notcrazy-8b-q4_k_m.gguf": ["bunnycore/L3-NotCrazy-8B-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
258
+ "Maverick-8B.Q5_K_M.gguf": ["RichardErkhov/bunnycore_-_Maverick-8B-gguf", MessagesFormatterType.LLAMA_3],
259
+ "Fireball-12B-v1.01a.Q4_K_M.gguf": ["mradermacher/Fireball-12B-v1.01a-GGUF", MessagesFormatterType.CHATML],
260
+ "Loki-v5.2.Q5_K_M.gguf": ["mradermacher/Loki-v5.2-GGUF", MessagesFormatterType.MISTRAL],
261
+ "Loki-v5.1.Q5_K_M.gguf": ["mradermacher/Loki-v5.1-GGUF", MessagesFormatterType.MISTRAL],
262
+ "GracieRP-freefallenLora-Gemma2-Inst-9B.i1-Q4_K_M.gguf": ["mradermacher/GracieRP-freefallenLora-Gemma2-Inst-9B-i1-GGUF", MessagesFormatterType.ALPACA],
263
+ "mistral-nemo-gutenberg-12B-v4.Q4_K_M.gguf": ["mradermacher/mistral-nemo-gutenberg-12B-v4-GGUF", MessagesFormatterType.MISTRAL],
264
+ "FunkyMerge-12b-0.1.Q4_K_M.gguf": ["mradermacher/FunkyMerge-12b-0.1-GGUF", MessagesFormatterType.MISTRAL],
265
+ "NemoMix-Unleashed-12B-Q4_K_M.gguf": ["bartowski/NemoMix-Unleashed-12B-GGUF", MessagesFormatterType.MISTRAL],
266
+ "IceTea21EnergyDrinkRPV13.Q4_K_S.gguf": ["mradermacher/IceTea21EnergyDrinkRPV13-GGUF", MessagesFormatterType.MISTRAL],
267
+ "MegaBeam-Mistral-7B-512k-Q5_K_M.gguf": ["bartowski/MegaBeam-Mistral-7B-512k-GGUF", MessagesFormatterType.MISTRAL],
268
+ "azur-8b-model_stock-q4_k_m.gguf": ["DreadPoor/Azur-8B-model_stock-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
269
+ "Chronos-Gold-12B-1.0-Q4_K_M.gguf": ["bartowski/Chronos-Gold-12B-1.0-GGUF", MessagesFormatterType.MISTRAL],
270
+ "L3.1-Romes-Ninomos-Maxxing.Q5_K_M.gguf": ["mradermacher/L3.1-Romes-Ninomos-Maxxing-GGUF", MessagesFormatterType.LLAMA_3],
271
+ "mistral-nemo-minitron-8b-base-q4_k_m.gguf": ["Daemontatox/Mistral-NeMo-Minitron-8B-Base-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
272
+ "Nokstella_coder-8B-model_stock.i1-Q4_K_S.gguf": ["mradermacher/Nokstella_coder-8B-model_stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
273
+ "vtion_model_v1.Q5_K_M.gguf": ["mradermacher/vtion_model_v1-GGUF", MessagesFormatterType.LLAMA_3],
274
+ "storiecreative-q5_k_m.gguf": ["ClaudioItaly/StorieCreative-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
275
+ "L3.1-gramamax.Q5_K_M.gguf": ["mradermacher/L3.1-gramamax-GGUF", MessagesFormatterType.MISTRAL],
276
+ "Evolutionstory128.Q5_K_M.gguf": ["mradermacher/Evolutionstory128-GGUF", MessagesFormatterType.CHATML],
277
+ "sellen-8b-model_stock-q4_k_m.gguf": ["DreadPoor/Sellen-8B-model_stock-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
278
+ "nokstella_coder-8b-model_stock-q4_k_m.gguf": ["DreadPoor/Nokstella_coder-8B-model_stock-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
279
+ "Ultra-Instruct-12B-Q4_K_M.gguf": ["bartowski/Ultra-Instruct-12B-GGUF", MessagesFormatterType.MISTRAL],
280
+ "L3.1-Sithamo-v0.4-8B.q5_k_m.gguf": ["kromquant/L3.1-Siithamo-v0.4-8B-GGUFs", MessagesFormatterType.MISTRAL],
281
+ "Berry-Spark-7B-Fix.Q5_K_M.gguf": ["mradermacher/Berry-Spark-7B-Fix-GGUF", MessagesFormatterType.OPEN_CHAT],
282
+ "llama3.1-gutenberg-8B.Q4_K_S.gguf": ["mradermacher/llama3.1-gutenberg-8B-GGUF", MessagesFormatterType.LLAMA_3],
283
+ "L3.1-Romes-Ninomos.Q4_K_S.gguf": ["mradermacher/L3.1-Romes-Ninomos-GGUF", MessagesFormatterType.LLAMA_3],
284
+ "nemo-12b-summarizer-de-v3.Q4_K_M.gguf": ["mradermacher/nemo-12b-summarizer-de-v3-GGUF", MessagesFormatterType.MISTRAL],
285
+ "suzume-llama-3-8B-multilingual-orpo-borda-top25.Q5_K_M.gguf": ["darkshapes/suzume-llama-3-8B-multilingual-orpo-borda-top25-gguf", MessagesFormatterType.LLAMA_3],
286
+ "Fireball-Mistral-Nemo-Base-2407-sft-v2.1.Q4_K_M.gguf": ["mradermacher/Fireball-Mistral-Nemo-Base-2407-sft-v2.1-GGUF", MessagesFormatterType.MISTRAL],
287
+ "gemma-2-9B-it-advanced-v2.1-Q5_K_M.gguf": ["jsgreenawalt/gemma-2-9B-it-advanced-v2.1-GGUF", MessagesFormatterType.ALPACA],
288
+ "mistral-12b-neptune-6k-instruct.Q4_K_M.gguf": ["mradermacher/mistral-12b-neptune-6k-instruct-GGUF", MessagesFormatterType.MISTRAL],
289
+ "evolutionstory-q5_k_m.gguf": ["ClaudioItaly/Evolutionstory-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
290
+ "AuraFinal12B-Q4_K_L-imat.gguf": ["Reiterate3680/AuraFinal12B-GGUF", MessagesFormatterType.MISTRAL],
291
+ "Hollow-Tail-V1-12B-Q5_K_M.gguf": ["starble-dev/Hollow-Tail-V1-12B-GGUF", MessagesFormatterType.MISTRAL],
292
+ "IceSakeRPTrainingTestV1-7b.Q5_K_M.gguf": ["mradermacher/IceSakeRPTrainingTestV1-7b-GGUF", MessagesFormatterType.MISTRAL],
293
+ "IceTea21EnergyDrinkRPV10.Q5_K_M.gguf": ["mradermacher/IceTea21EnergyDrinkRPV10-GGUF", MessagesFormatterType.MISTRAL],
294
+ "MN-LooseCannon-12B-v2-Q4_K_L-imat.gguf": ["Reiterate3680/MN-LooseCannon-12B-v2-GGUF", MessagesFormatterType.CHATML],
295
+ "MN-MT3-m4-12B-Q4_K_L-imat.gguf": ["Reiterate3680/MN-MT3-m4-12B-GGUF", MessagesFormatterType.CHATML],
296
+ "Mahou-Gutenberg-Nemo-12B.Q4_K_M.gguf": ["mradermacher/Mahou-Gutenberg-Nemo-12B-GGUF", MessagesFormatterType.MISTRAL],
297
+ "Mahou-1.3-llama3.1-8B.Q5_K_M.gguf": ["mradermacher/Mahou-1.3-llama3.1-8B-GGUF", MessagesFormatterType.CHATML],
298
+ "gemma-advanced-v1.Q4_K_M.gguf": ["QuantFactory/gemma-advanced-v1-GGUF", MessagesFormatterType.ALPACA],
299
+ "flammen21X-mistral-7B-Q5_K_M.gguf": ["duyntnet/flammen21X-mistral-7B-imatrix-GGUF", MessagesFormatterType.MISTRAL],
300
+ "Magnum-Instruct-DPO-12B.Q4_K_M.gguf": ["mradermacher/Magnum-Instruct-DPO-12B-GGUF", MessagesFormatterType.MISTRAL],
301
+ "Carasique-v0.3b.Q4_K_S.gguf": ["mradermacher/Carasique-v0.3b-GGUF", MessagesFormatterType.MISTRAL],
302
+ "MN-12b-Sunrose-Q4_K_L-imat.gguf": ["Reiterate3680/MN-12b-Sunrose-GGUF", MessagesFormatterType.MISTRAL],
303
+ "OpenChat-3.5-7B-SOLAR-v2.0.i1-Q4_K_M.gguf": ["mradermacher/OpenChat-3.5-7B-SOLAR-v2.0-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
304
+ "Carasique-v0.3.Q4_K_M.gguf": ["mradermacher/Carasique-v0.3-GGUF", MessagesFormatterType.MISTRAL],
305
+ "Crimson_Dawn-V0.1.Q4_K_M.gguf": ["mradermacher/Crimson_Dawn-V0.1-GGUF", MessagesFormatterType.MISTRAL],
306
+ "Samantha-hermes3-8b-model-fixed.i1-Q5_K_M.gguf": ["mradermacher/Samantha-hermes3-8b-model-fixed-i1-GGUF", MessagesFormatterType.MISTRAL],
307
+ "Hermes-3-Llama-3.1-8B-lorablated-Q5_K_M.gguf": ["bartowski/Hermes-3-Llama-3.1-8B-lorablated-GGUF", MessagesFormatterType.LLAMA_3],
308
+ "stratagem-instruct-12b.i1-Q4_K_M.gguf": ["mradermacher/stratagem-instruct-12b-i1-GGUF", MessagesFormatterType.MISTRAL],
309
+ "omed-llama3.1-8b.Q5_K_M.gguf": ["mradermacher/omed-llama3.1-8b-GGUF", MessagesFormatterType.LLAMA_3],
310
+ "omed-gemma2-9b.i1-Q4_K_M.gguf": ["mradermacher/omed-gemma2-9b-i1-GGUF", MessagesFormatterType.ALPACA],
311
+ "L3.1-Siithamo-v0.3-8B.q5_k_m.gguf": ["kromquant/L3.1-Siithamo-v0.3-8B-GGUFs", MessagesFormatterType.LLAMA_3],
312
+ "mistral-nemo-gutenberg-12B-v3.i1-Q4_K_M.gguf": ["mradermacher/mistral-nemo-gutenberg-12B-v3-i1-GGUF", MessagesFormatterType.MISTRAL],
313
+ "MN-12B-Tarsus-Q4_K_L-imat.gguf": ["Reiterate3680/MN-12B-Tarsus-GGUF", MessagesFormatterType.MISTRAL],
314
+ "Magnum-Instruct-12B.Q4_K_M.gguf": ["mradermacher/Magnum-Instruct-12B-GGUF", MessagesFormatterType.MISTRAL],
315
+ "Rocinante-12B-v1.i1-Q4_K_M.gguf": ["mradermacher/Rocinante-12B-v1-i1-GGUF", MessagesFormatterType.MISTRAL],
316
+ "Llama-3.1-Storm-8B-Q5_K_M.gguf": ["bartowski/Llama-3.1-Storm-8B-GGUF", MessagesFormatterType.MISTRAL],
317
+ "Tess-3-Mistral-Nemo-12B.i1-Q4_K_M.gguf": ["mradermacher/Tess-3-Mistral-Nemo-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
318
+ "Hermes-3-Llama-3.1-8B.Q5_K_M.gguf": ["mradermacher/Hermes-3-Llama-3.1-8B-GGUF", MessagesFormatterType.MISTRAL],
319
+ "Roleplay-Hermes-3-Llama-3.1-8B.i1-Q5_K_M.gguf": ["mradermacher/Roleplay-Hermes-3-Llama-3.1-8B-i1-GGUF", MessagesFormatterType.MISTRAL],
320
+ "Dusk_Rainbow_Ep03-Q5_K_M.gguf": ["SicariusSicariiStuff/Dusk_Rainbow_GGUFs", MessagesFormatterType.LLAMA_3],
321
+ "NemoReRemix-12B-Q4_K_M.gguf": ["bartowski/NemoReRemix-12B-GGUF", MessagesFormatterType.MISTRAL],
322
+ "Aura-NeMo-12B-Q4_K_L-imat.gguf": ["Reiterate3680/Aura-NeMo-12B-GGUF", MessagesFormatterType.MISTRAL],
323
+ "TypeII-12B.Q4_K_S.gguf": ["mradermacher/TypeII-12B-GGUF", MessagesFormatterType.MISTRAL],
324
+ "TypeII-A-12B.Q4_K_M.gguf": ["mradermacher/TypeII-A-12B-GGUF", MessagesFormatterType.CHATML],
325
+ "yuna-ai-v3-atomic-q_4_k_m.gguf": ["yukiarimo/yuna-ai-v3-atomic", MessagesFormatterType.CHATML],
326
+ "Peach-9B-8k-Roleplay-Q4_K_M.gguf": ["bartowski/Peach-9B-8k-Roleplay-GGUF", MessagesFormatterType.LLAMA_3],
327
+ "heartstolen_model-stock_8b-q4_k_m.gguf": ["DreadPoor/HeartStolen_model-stock_8B-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
328
+ "Llama-3.1-8B-ArliAI-Formax-v1.0-Q5_K_M.gguf": ["ArliAI/Llama-3.1-8B-ArliAI-Formax-v1.0-GGUF", MessagesFormatterType.MISTRAL],
329
+ "ArliAI-Llama-3-8B-Formax-v1.0-Q5_K_M.gguf": ["ArliAI/ArliAI-Llama-3-8B-Formax-v1.0-GGUF", MessagesFormatterType.LLAMA_3],
330
+ "Llama-3.1-8B-ArliAI-RPMax-v1.0-Q5_K_M.gguf": ["ArliAI/Llama-3.1-8B-ArliAI-RPMax-v1.0-GGUF", MessagesFormatterType.MISTRAL],
331
+ "badger-writer-llama-3-8b-q4_k_m.gguf": ["A2va/badger-writer-llama-3-8b-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
332
+ "magnum-12b-v2.5-kto-Q4_K_L-imat.gguf": ["Reiterate3680/magnum-12b-v2.5-kto-GGUF", MessagesFormatterType.CHATML],
333
+ "CeleMo-Instruct-128k.Q4_K_S.gguf": ["mradermacher/CeleMo-Instruct-128k-GGUF", MessagesFormatterType.CHATML],
334
+ "KukulStanta-7B-Seamaiiza-7B-v1-slerp-merge.q3_k_l.gguf": ["AlekseiPravdin/KukulStanta-7B-Seamaiiza-7B-v1-slerp-merge-gguf", MessagesFormatterType.MISTRAL],
335
+ "HolyNemo-12B.Q4_K_M.gguf": ["mradermacher/HolyNemo-12B-GGUF", MessagesFormatterType.MISTRAL],
336
+ "mistral-nemo-gutenberg-12B-v2.Q4_K_M.gguf": ["mradermacher/mistral-nemo-gutenberg-12B-v2-GGUF", MessagesFormatterType.MISTRAL],
337
+ "KukulStanta-InfinityRP-7B-slerp.Q5_K_M.gguf": ["mradermacher/KukulStanta-InfinityRP-7B-slerp-GGUF", MessagesFormatterType.MISTRAL],
338
+ "Rocinante-12B-v1a-Q4_K_M.gguf": ["BeaverAI/Rocinante-12B-v1a-GGUF", MessagesFormatterType.MISTRAL],
339
+ "gemma-2-9b-it-WPO-HB.Q4_K_M.gguf": ["mradermacher/gemma-2-9b-it-WPO-HB-GGUF", MessagesFormatterType.ALPACA],
340
+ "mistral-nemo-bophades-12B.Q4_K_M.gguf": ["mradermacher/mistral-nemo-bophades-12B-GGUF", MessagesFormatterType.MISTRAL],
341
+ "Stella-mistral-nemo-12B.Q4_K_S.gguf": ["mradermacher/Stella-mistral-nemo-12B-GGUF", MessagesFormatterType.MISTRAL],
342
+ "Gemma-2-Ataraxy-9B.Q4_K_M.gguf": ["mradermacher/Gemma-2-Ataraxy-9B-GGUF", MessagesFormatterType.ALPACA],
343
+ "NemoRemix-Magnum_V2_Base-12B.Q4_K_S.gguf": ["mradermacher/NemoRemix-Magnum_V2_Base-12B-GGUF", MessagesFormatterType.MISTRAL],
344
+ "Synatra-7B-v0.3-dpo.Q5_K_M.gguf": ["mradermacher/Synatra-7B-v0.3-dpo-GGUF", MessagesFormatterType.MISTRAL],
345
+ "OpenCrystal-12B-Instruct.Q4_K_M.gguf": ["mradermacher/OpenCrystal-12B-Instruct-GGUF", MessagesFormatterType.MISTRAL],
346
+ "dolphinmaid_l3-1_01sl-q5ks.gguf": ["Dunjeon/DolphinMaid_L3.1_8B-01_GGUF", MessagesFormatterType.LLAMA_3],
347
+ "TypeI-12B.Q4_K_S.gguf": ["mradermacher/TypeI-12B-GGUF", MessagesFormatterType.CHATML],
348
+ "lyralin-12b-v1-q5_k_m.gguf": ["NGalrion/Lyralin-12B-v1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
349
+ "margnum-12b-v1-q5_k_m.gguf": ["NGalrion/Margnum-12B-v1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
350
+ "L3-Boshima-a.Q5_K_M.gguf": ["mradermacher/L3-Boshima-a-GGUF", MessagesFormatterType.LLAMA_3],
351
+ "canidori-12b-v1-q5_k_m.gguf": ["NGalrion/Canidori-12B-v1-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
352
+ "MN-12B-Estrella-v1.Q4_K_S.gguf": ["mradermacher/MN-12B-Estrella-v1-GGUF", MessagesFormatterType.CHATML],
353
+ "gemmaomni2-2b-q5_k_m.gguf": ["bunnycore/GemmaOmni2-2B-Q5_K_M-GGUF", MessagesFormatterType.ALPACA],
354
+ "MN-LooseCannon-12B-v1.Q4_K_M.gguf": ["mradermacher/MN-LooseCannon-12B-v1-GGUF", MessagesFormatterType.CHATML],
355
+ "Pleiades-12B-v1.Q4_K_M.gguf": ["mradermacher/Pleiades-12B-v1-GGUF", MessagesFormatterType.CHATML],
356
+ "mistral-nemo-gutenberg-12B.Q4_K_S.gguf": ["mradermacher/mistral-nemo-gutenberg-12B-GGUF", MessagesFormatterType.MISTRAL],
357
+ "gemma2-gutenberg-9B.Q4_K_M.gguf": ["mradermacher/gemma2-gutenberg-9B-GGUF", MessagesFormatterType.ALPACA],
358
+ "NemoDori-v0.5-12B-MN-BT.i1-Q4_K_M.gguf": ["mradermacher/NemoDori-v0.5-12B-MN-BT-i1-GGUF", MessagesFormatterType.MISTRAL],
359
+ "NemoDori-v0.2.1-12B-MN-BT.Q4_K_M.gguf": ["mradermacher/NemoDori-v0.2.1-12B-MN-BT-GGUF", MessagesFormatterType.MISTRAL],
360
+ "NemoDori-v0.2.2-12B-MN-ties.Q4_K_M.gguf": ["mradermacher/NemoDori-v0.2.2-12B-MN-ties-GGUF", MessagesFormatterType.MISTRAL],
361
+ "Mini-Magnum-Unboxed-12B-Q4_K_M.gguf": ["concedo/Mini-Magnum-Unboxed-12B-GGUF", MessagesFormatterType.ALPACA],
362
+ "L3.1-Siithamo-v0.1-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Siithamo-v0.1-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
363
+ "L3.1-Siithamo-v0.2-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Siithamo-v0.2-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
364
+ "Kitsunebi-v1-Gemma2-8k-9B.Q5_K_M.gguf": ["grimjim/Kitsunebi-v1-Gemma2-8k-9B-GGUF", MessagesFormatterType.ALPACA],
365
+ "Llama-3-8B-Stroganoff-3.0.i1-Q4_K_M.gguf": ["mradermacher/Llama-3-8B-Stroganoff-3.0-i1-GGUF", MessagesFormatterType.LLAMA_3],
366
+ "NemoDori-v0.2-12B-MN-BT.i1-Q4_K_M.gguf": ["mradermacher/NemoDori-v0.2-12B-MN-BT-i1-GGUF", MessagesFormatterType.CHATML],
367
+ "NemoDori-v0.1-12B-MS.Q4_K_M.gguf": ["mradermacher/NemoDori-v0.1-12B-MS-GGUF", MessagesFormatterType.CHATML],
368
+ "magnum-12b-v2.i1-Q4_K_M.gguf": ["mradermacher/magnum-12b-v2-i1-GGUF", MessagesFormatterType.CHATML],
369
+ "Alpaca-Llama3.1-8B.Q5_K_M.gguf": ["mradermacher/Alpaca-Llama3.1-8B-GGUF", MessagesFormatterType.CHATML],
370
+ "Orthrus-12b-v0.8.Q4_K_M.gguf": ["mradermacher/Orthrus-12b-v0.8-GGUF", MessagesFormatterType.CHATML],
371
+ "LongWriter-llama3.1-8b-Q5_K_M.gguf": ["bartowski/LongWriter-llama3.1-8b-GGUF", MessagesFormatterType.MISTRAL],
372
+ "L3-bluuwhale-SAO-MIX-8B-V1_fp32-merge-calc.Q5_K_M.gguf": ["mradermacher/L3-bluuwhale-SAO-MIX-8B-V1_fp32-merge-calc-GGUF", MessagesFormatterType.LLAMA_3],
373
+ "YetAnotherMerge-v0.5.Q4_K_M.gguf": ["mradermacher/YetAnotherMerge-v0.5-GGUF", MessagesFormatterType.CHATML],
374
+ "open-hermes-sd-finetune-erot-story.Q5_K_M.gguf": ["mradermacher/open-hermes-sd-finetune-erot-story-GGUF", MessagesFormatterType.CHATML],
375
+ "OntologyHermes-2.5-Mistral-7B.Q6_K.gguf": ["mradermacher/OntologyHermes-2.5-Mistral-7B-GGUF", MessagesFormatterType.MISTRAL],
376
+ "cosmic-2.i1-Q5_K_M.gguf": ["mradermacher/cosmic-2-i1-GGUF", MessagesFormatterType.MISTRAL],
377
+ "L3-Horizon-Anteros-Ara-v0.1-9B.i1-Q4_K_M.gguf": ["mradermacher/L3-Horizon-Anteros-Ara-v0.1-9B-i1-GGUF", MessagesFormatterType.LLAMA_3],
378
+ "Mistral-Nemo-Instruct-2407.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-Instruct-2407-i1-GGUF", MessagesFormatterType.MISTRAL],
379
+ "Ellaria-9B.i1-Q4_K_M.gguf": ["mradermacher/Ellaria-9B-i1-GGUF", MessagesFormatterType.ALPACA],
380
+ "Apollo-0.4-Llama-3.1-8B.i1-Q5_K_M.gguf": ["mradermacher/Apollo-0.4-Llama-3.1-8B-i1-GGUF", MessagesFormatterType.MISTRAL],
381
+ "NemoRemix-12B.Q4_K_M.gguf": ["mradermacher/NemoRemix-12B-GGUF", MessagesFormatterType.MISTRAL],
382
+ "32K_Selfbot.i1-Q5_K_M.gguf": ["mradermacher/32K_Selfbot-i1-GGUF", MessagesFormatterType.MISTRAL],
383
+ "Viviana_V3.i1-Q5_K_M.gguf": ["mradermacher/Viviana_V3-i1-GGUF", MessagesFormatterType.MISTRAL],
384
+ "dolphin-2.9.4-llama3.1-8b.i1-Q5_K_M.gguf": ["mradermacher/dolphin-2.9.4-llama3.1-8b-i1-GGUF", MessagesFormatterType.CHATML],
385
+ "L3-SAO-MIX-8B-V1.i1-Q5_K_M.gguf": ["mradermacher/L3-SAO-MIX-8B-V1-i1-GGUF", MessagesFormatterType.LLAMA_3],
386
+ "bestofllama3-8b-stock-q5_k_m.gguf": ["bunnycore/BestofLLama3-8B-stock-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
387
+ "L3-Umbral-Mind-RP-v3.0-8B-Q5_K_M.gguf": ["bartowski/L3-Umbral-Mind-RP-v3.0-8B-GGUF", MessagesFormatterType.LLAMA_3],
388
+ "Tess-3-Mistral-Nemo-Q4_K_M.gguf": ["bartowski/Tess-3-Mistral-Nemo-GGUF", MessagesFormatterType.MISTRAL],
389
+ "Llama-3-8B-Stroganoff-2.0.Q5_K_M.gguf": ["RichardErkhov/HiroseKoichi_-_Llama-3-8B-Stroganoff-2.0-gguf", MessagesFormatterType.LLAMA_3],
390
+ "L3-8B-Helium3.Q5_K_M.gguf": ["mradermacher/L3-8B-Helium3-GGUF", MessagesFormatterType.LLAMA_3],
391
+ "MN-12B-Lyra-v1.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Lyra-v1-i1-GGUF", MessagesFormatterType.CHATML],
392
+ "mahou-1.3-mistral-nemo-12b-q5_k_m.gguf": ["sh1njuku/Mahou-1.3-mistral-nemo-12B-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
393
+ "Humanish-Roleplay-Llama-3.1-8B.i1-Q5_K_M.gguf": ["mradermacher/Humanish-Roleplay-Llama-3.1-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
394
+ "Llama-3-Luminurse-v0.1-OAS-8B.Q5_K_M.gguf": ["grimjim/Llama-3-Luminurse-v0.1-OAS-8B-GGUF", MessagesFormatterType.LLAMA_3],
395
+ "L3.1-8B-Niitama-v1.1-Q5_K_M-imat.gguf": ["L3.1-8B-Niitama-v1.1-Q5_K_M-imat.gguf", MessagesFormatterType.MISTRAL],
396
+ "Evolved-Llama3-8B.i1-Q5_K_M.gguf": ["mradermacher/Evolved-Llama3-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
397
+ "Pantheon-RP-1.5-12b-Nemo.i1-Q4_K_M.gguf": ["mradermacher/Pantheon-RP-1.5-12b-Nemo-i1-GGUF", MessagesFormatterType.CHATML],
398
+ "DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored-Q5_K_M.gguf": ["bartowski/DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored-GGUF", MessagesFormatterType.LLAMA_3],
399
+ "Llama-3-Swallow-8B-Instruct-v0.1.Q5_K_M.gguf": ["YukiTomita-CC/Llama-3-Swallow-8B-Instruct-v0.1-IMat-GGUF_dolly-15k-ja-prompt", MessagesFormatterType.ALPACA],
400
+ "natsumura-storytelling-rp-1.0-llama-3.1-8B.Q5_K_M.gguf": ["tohur/natsumura-storytelling-rp-1.0-llama-3.1-8b-GGUF", MessagesFormatterType.LLAMA_3],
401
+ "mini-magnum-12b-v1.1.i1-Q4_K_M.gguf": ["mradermacher/mini-magnum-12b-v1.1-i1-GGUF", MessagesFormatterType.MISTRAL],
402
+ "MN-12B-Celeste-V1.9-Q4_K_M.gguf": ["bartowski/MN-12B-Celeste-V1.9-GGUF", MessagesFormatterType.CHATML],
403
+ "Llama-3.1-Techne-RP-8b-v1.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-Techne-RP-8b-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
404
+ "L3-Rhaenys-8B.i1-Q5_K_M.gguf": ["mradermacher/L3-Rhaenys-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
405
+ "Llama-3.1-8b-Uncensored-Dare.i1-Q4_K_M.gguf": ["mradermacher/Llama-3.1-8b-Uncensored-Dare-i1-GGUF", MessagesFormatterType.LLAMA_3],
406
+ "Eros_Scribe-10.7b-v3.Q4_K_M.gguf": ["mradermacher/Eros_Scribe-10.7b-v3-GGUF", MessagesFormatterType.MISTRAL],
407
+ "Gemma2-Nephilim-v3-9B.i1-Q5_K_M.gguf": ["mradermacher/Gemma2-Nephilim-v3-9B-i1-GGUF", MessagesFormatterType.ALPACA],
408
+ "Nemomix-v4.0-12B-Q4_K_M.gguf": ["bartowski/Nemomix-v4.0-12B-GGUF", MessagesFormatterType.CHATML],
409
+ "Nemomix-v0.1-12B-Q4_K_M.gguf": ["bartowski/Nemomix-v0.1-12B-GGUF", MessagesFormatterType.CHATML],
410
+ "Loki-v2.1.i1-Q5_K_M.gguf": ["mradermacher/Loki-v2.1-i1-GGUF", MessagesFormatterType.LLAMA_3],
411
+ "llama3-8B-Special-Dark-RP2.i1-Q5_K_M.gguf": ["mradermacher/llama3-8B-Special-Dark-RP2-i1-GGUF", MessagesFormatterType.LLAMA_3],
412
+ "L3-8B-Celeste-v1-Q5_K_M.gguf": ["bartowski/L3-8B-Celeste-v1-GGUF", MessagesFormatterType.LLAMA_3],
413
+ "L3-8B-Celeste-V1.2-Q5_K_M.gguf": ["bartowski/L3-8B-Celeste-V1.2-GGUF", MessagesFormatterType.LLAMA_3],
414
+ "L3.1-8B-Celeste-V1.5.i1-Q5_K_M.gguf": ["mradermacher/L3.1-8B-Celeste-V1.5-i1-GGUF", MessagesFormatterType.MISTRAL],
415
+ "Celeste-12B-V1.6-Q4_K_M.gguf": ["bartowski/Celeste-12B-V1.6-GGUF", MessagesFormatterType.MISTRAL],
416
+ "L3-SthenoMaidBlackroot-8B-V1-exp5-11-Q4_K_M.gguf": ["DavidAU/L3-SthenoMaidBlackroot-8.9B-V1-BRAINSTORM-5x-GGUF", MessagesFormatterType.LLAMA_3],
417
+ "Llama-3.1-8B-Instruct-Fei-v1-Uncensored.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-8B-Instruct-Fei-v1-Uncensored-i1-GGUF", MessagesFormatterType.MISTRAL],
418
+ "IceCoffeeRP-7b.i1-Q5_K_M.gguf": ["mradermacher/IceCoffeeRP-7b-i1-GGUF", MessagesFormatterType.ALPACA],
419
+ "lumi-nemo-e2.0.Q4_K_M.gguf": ["mradermacher/lumi-nemo-e2.0-GGUF", MessagesFormatterType.MISTRAL],
420
+ "Lumimaid-v0.2-8B.i1-Q5_K_M.gguf": ["mradermacher/Lumimaid-v0.2-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
421
+ "Lumimaid-v0.2-12B.i1-Q4_K_M.gguf": ["mradermacher/Lumimaid-v0.2-12B-i1-GGUF", MessagesFormatterType.LLAMA_3],
422
+ "Llama-3.1-8B-Instruct-abliterated_via_adapter.Q5_K_M.gguf": ["grimjim/Llama-3.1-8B-Instruct-abliterated_via_adapter-GGUF", MessagesFormatterType.LLAMA_3],
423
+ "Llama-Nephilim-Metamorphosis-v1-8B.Q5_K_M.gguf": ["grimjim/Llama-Nephilim-Metamorphosis-v1-8B-GGUF", MessagesFormatterType.LLAMA_3],
424
+ "Meta-Llama-3.1-8B-Instruct-abliterated.i1-Q5_K_M.gguf": ["mradermacher/Meta-Llama-3.1-8B-Instruct-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
425
+ "pstella-16b.Q5_K_M.gguf": ["mradermacher/pstella-16b-GGUF", MessagesFormatterType.LLAMA_3],
426
+ "DarkIdol-Llama-3.1-8B-Instruct-1.1-Uncensored.i1-Q5_K_M.gguf": ["mradermacher/DarkIdol-Llama-3.1-8B-Instruct-1.1-Uncensored-i1-GGUF", MessagesFormatterType.LLAMA_3],
427
+ "Mistral-Nemo-Instruct-2407-Q4_K_M.gguf": ["bartowski/Mistral-Nemo-Instruct-2407-GGUF", MessagesFormatterType.MISTRAL],
428
+ "ghost-8b-beta.q5_k.gguf": ["ZeroWw/ghost-8b-beta-GGUF", MessagesFormatterType.MISTRAL],
429
+ "Honey-Yuzu-13B.Q4_K_M.gguf": ["backyardai/Honey-Yuzu-13B-GGUF", MessagesFormatterType.MISTRAL],
430
+ "llama3-8B-DarkIdol-2.3-Uncensored-32K.i1-Q5_K_M.gguf": ["mradermacher/llama3-8B-DarkIdol-2.3-Uncensored-32K-i1-GGUF", MessagesFormatterType.LLAMA_3],
431
+ "LLaMa-3-Instruct-SmallPrefMix-ORPO-8B.i1-Q5_K_M.gguf": ["mradermacher/LLaMa-3-Instruct-SmallPrefMix-ORPO-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
432
+ "NeuralLemon.Q5_K_M.gguf": ["backyardai/NeuralLemon-GGUF", MessagesFormatterType.MISTRAL],
433
+ "Llama-3-Intermix.i1-Q5_K_M.gguf": ["mradermacher/Llama-3-Intermix-i1-GGUF", MessagesFormatterType.LLAMA_3],
434
+ "C3TR-Adapter-Q4_k_m.gguf": ["webbigdata/C3TR-Adapter_gguf", MessagesFormatterType.ALPACA],
435
+ "Llama-3-8B-Magpie-Mix-RC-UltraDPO-08-3.Q5_K_M.gguf": ["mradermacher/Llama-3-8B-Magpie-Mix-RC-UltraDPO-08-3-GGUF", MessagesFormatterType.LLAMA_3],
436
+ "Tiger-Gemma-9B-v2.Q4_K_M.gguf": ["QuantFactory/Tiger-Gemma-9B-v2-GGUF", MessagesFormatterType.ALPACA],
437
+ "gemma-2-9b-it-SimPO.i1-Q4_K_M.gguf": ["mradermacher/gemma-2-9b-it-SimPO-i1-GGUF", MessagesFormatterType.ALPACA],
438
+ "Gemma-2-9B-It-SPPO-Iter3.Q4_K_M.iMatrix.gguf": ["MCZK/Gemma-2-9B-It-SPPO-Iter3-GGUF", MessagesFormatterType.ALPACA],
439
+ "Llama-3-NeuralPaca-8b.Q4_K_M.gguf": ["RichardErkhov/NeuralNovel_-_Llama-3-NeuralPaca-8b-gguf", MessagesFormatterType.ALPACA],
440
+ "SaoRPM-2x8B.i1-Q4_K_M.gguf": ["mradermacher/SaoRPM-2x8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
441
+ "L3-Hecate-8B-v1.2.Q4_K_M.gguf": ["mradermacher/L3-Hecate-8B-v1.2-GGUF", MessagesFormatterType.LLAMA_3],
442
+ "Mahou-1.3b-llama3-8B.i1-Q4_K_M.gguf": ["mradermacher/Mahou-1.3b-llama3-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
443
+ "SwallowMaid-8B-L3-SPPO-abliterated.i1-Q5_K_M.gguf": ["mradermacher/SwallowMaid-8B-L3-SPPO-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
444
+ "L3-8B-Lunar-Stheno.i1-Q5_K_M.gguf": ["mradermacher/L3-8B-Lunar-Stheno-i1-GGUF", MessagesFormatterType.LLAMA_3],
445
+ "llama3_Loradent.Q4_K_M.gguf": ["mradermacher/llama3_Loradent-GGUF", MessagesFormatterType.LLAMA_3],
446
+ "Llama-3-8B-Stroganoff.i1-Q4_K_M.gguf": ["mradermacher/Llama-3-8B-Stroganoff-i1-GGUF", MessagesFormatterType.LLAMA_3],
447
+ "L3-8B-EnchantedForest-v0.5.i1-Q4_K_M.gguf": ["mradermacher/L3-8B-EnchantedForest-v0.5-i1-GGUF", MessagesFormatterType.LLAMA_3],
448
+ "gemma-radiation-rp-9b-q5_k_m.gguf": ["pegasus912/Gemma-Radiation-RP-9B-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
449
+ "Magic-Dolphin-7b.Q4_K_M.gguf": ["mradermacher/Magic-Dolphin-7b-GGUF", MessagesFormatterType.MISTRAL],
450
+ "mathstral-7B-v0.1-Q5_K_M.gguf": ["bartowski/mathstral-7B-v0.1-GGUF", MessagesFormatterType.MISTRAL],
451
+ "Gemma2-9B-it-Boku-v1.Q5_K_M.gguf": ["mradermacher/Gemma2-9B-it-Boku-v1-GGUF", MessagesFormatterType.MISTRAL],
452
+ "Gemma-2-9B-It-SPPO-Iter3-Q5_K_M.gguf": ["grapevine-AI/Gemma-2-9B-It-SPPO-Iter3-GGUF", MessagesFormatterType.MISTRAL],
453
+ "L3-8B-Niitama-v1.i1-Q4_K_M.gguf": ["mradermacher/L3-8B-Niitama-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
454
+ "Maidphin-Kunoichi-7B.Q5_K_M.gguf": ["RichardErkhov/nbeerbower_-_Maidphin-Kunoichi-7B-gguf", MessagesFormatterType.MISTRAL],
455
+ "L3-15B-EtherealMaid-t0.0001.i1-Q4_K_M.gguf": ["mradermacher/L3-15B-EtherealMaid-t0.0001-i1-GGUF", MessagesFormatterType.LLAMA_3],
456
+ "L3-15B-MythicalMaid-t0.0001.i1-Q4_K_M.gguf": ["mradermacher/L3-15B-MythicalMaid-t0.0001-i1-GGUF", MessagesFormatterType.LLAMA_3],
457
+ "llama-3-Nephilim-v3-8B.Q5_K_M.gguf": ["grimjim/llama-3-Nephilim-v3-8B-GGUF", MessagesFormatterType.LLAMA_3],
458
+ "NarutoDolphin-10B.Q5_K_M.gguf": ["RichardErkhov/FelixChao_-_NarutoDolphin-10B-gguf", MessagesFormatterType.MISTRAL],
459
+ "l3-8b-tamamo-v1-q8_0.gguf": ["Ransss/L3-8B-Tamamo-v1-Q8_0-GGUF", MessagesFormatterType.LLAMA_3],
460
+ "Tiger-Gemma-9B-v1-Q4_K_M.gguf": ["bartowski/Tiger-Gemma-9B-v1-GGUF", MessagesFormatterType.LLAMA_3],
461
+ "TooManyMixRolePlay-7B-Story_V3.5.Q4_K_M.gguf": ["mradermacher/TooManyMixRolePlay-7B-Story_V3.5-GGUF", MessagesFormatterType.LLAMA_3],
462
+ "natsumura-llama3-v1.1-8b.Q4_K_M.gguf": ["mradermacher/natsumura-llama3-v1.1-8b-GGUF", MessagesFormatterType.LLAMA_3],
463
+ "natsumura-llama3-v1-8b.i1-Q4_K_M.gguf": ["mradermacher/natsumura-llama3-v1-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
464
+ "nephra_v1.0.Q5_K_M.gguf": ["PrunaAI/yodayo-ai-nephra_v1.0-GGUF-smashed", MessagesFormatterType.LLAMA_3],
465
+ "DPO-ONLY-Zephyr-7B.Q6_K.gguf": ["mradermacher/DPO-ONLY-Zephyr-7B-GGUF", MessagesFormatterType.LLAMA_3],
466
+ "L3-Deluxe-Scrambled-Eggs-On-Toast-8B.Q8_0.gguf": ["mradermacher/L3-Deluxe-Scrambled-Eggs-On-Toast-8B-GGUF", MessagesFormatterType.LLAMA_3],
467
+ "L3-Scrambled-Eggs-On-Toast-8B.i1-Q6_K.gguf": ["mradermacher/L3-Scrambled-Eggs-On-Toast-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
468
+ "Llama-3-uncensored-Dare-1.Q4_K_M.gguf": ["mradermacher/Llama-3-uncensored-Dare-1-GGUF", MessagesFormatterType.LLAMA_3],
469
+ "llama3-8B-DarkIdol-2.2-Uncensored-1048K.i1-Q6_K.gguf": ["mradermacher/llama3-8B-DarkIdol-2.2-Uncensored-1048K-i1-GGUF", MessagesFormatterType.LLAMA_3],
470
+ "dolphin-2.9.3-mistral-7b-32k-q4_k_m.gguf": ["huggingkot/dolphin-2.9.3-mistral-7B-32k-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
471
+ "dolphin-2.9.3-mistral-7B-32k-Q5_K_M.gguf": ["bartowski/dolphin-2.9.3-mistral-7B-32k-GGUF", MessagesFormatterType.MISTRAL],
472
+ "Lexi-Llama-3-8B-Uncensored_Q5_K_M.gguf": ["Orenguteng/Llama-3-8B-Lexi-Uncensored-GGUF", MessagesFormatterType.LLAMA_3],
473
+ "Llama3-Sophie.Q8_0.gguf": ["mradermacher/Llama3-Sophie-GGUF", MessagesFormatterType.LLAMA_3],
474
+ "Aura-Uncensored-OAS-8B-L3.i1-Q4_K_M.gguf": ["mradermacher/Aura-Uncensored-OAS-8B-L3-i1-GGUF", MessagesFormatterType.LLAMA_3],
475
+ "L3-Uncen-Merger-Omelette-RP-v0.2-8B-Q5_K_S-imat.gguf": ["LWDCLS/L3-Uncen-Merger-Omelette-RP-v0.2-8B-GGUF-IQ-Imatrix-Request", MessagesFormatterType.LLAMA_3],
476
+ "qwen2-diffusion-prompter-v01-q6_k.gguf": ["trollek/Qwen2-0.5B-DiffusionPrompter-v0.1-GGUF", MessagesFormatterType.LLAMA_3],
477
+ "Smegmma-Deluxe-9B-v1-Q6_K.gguf": ["bartowski/Smegmma-Deluxe-9B-v1-GGUF", MessagesFormatterType.MISTRAL],
478
+ "Mahou-1.3c-mistral-7B.i1-Q6_K.gguf": ["mradermacher/Mahou-1.3c-mistral-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
479
+ "Silicon-Maid-7B-Q8_0_X.gguf": ["duyntnet/Silicon-Maid-7B-imatrix-GGUF", MessagesFormatterType.ALPACA],
480
+ "l3-umbral-mind-rp-v3.0-8b-q5_k_m-imat.gguf": ["Casual-Autopsy/L3-Umbral-Mind-RP-v3.0-8B-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
481
+ "Meta-Llama-3.1-8B-Claude-iMat-Q5_K_M.gguf": ["InferenceIllusionist/Meta-Llama-3.1-8B-Claude-iMat-GGUF", MessagesFormatterType.LLAMA_3],
482
+ "Phi-3.1-mini-128k-instruct-Q6_K_L.gguf": ["bartowski/Phi-3.1-mini-128k-instruct-GGUF", MessagesFormatterType.PHI_3],
483
+ "tifa-7b-qwen2-v0.1.q4_k_m.gguf": ["Tifa-RP/Tifa-7B-Qwen2-v0.1-GGUF", MessagesFormatterType.OPEN_CHAT],
484
+ "Oumuamua-7b-RP_Q5_K_M.gguf": ["Aratako/Oumuamua-7b-RP-GGUF", MessagesFormatterType.MISTRAL],
485
+ "Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW_iMat_Ch200_IQ4_XS.gguf": ["dddump/Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW-gguf", MessagesFormatterType.VICUNA],
486
+ "ChatWaifu_v1.2.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.2.1-GGUF", MessagesFormatterType.MISTRAL],
487
+ "ChatWaifu_v1.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.1-GGUF", MessagesFormatterType.MISTRAL],
488
+ "Ninja-V2-7B_Q4_K_M.gguf": ["Local-Novel-LLM-project/Ninja-V2-7B-GGUF", MessagesFormatterType.VICUNA],
489
+ "Yamase-12B.Q4_K_M.gguf": ["mradermacher/Yamase-12B-GGUF", MessagesFormatterType.MISTRAL],
490
+ "borea-phi-3.5-mini-instruct-common.Q5_K_M.gguf": ["keitokei1994/Borea-Phi-3.5-mini-Instruct-Common-GGUF", MessagesFormatterType.PHI_3],
491
+ "Llama-3-Nymeria-ELYZA-8B.i1-Q4_K_M.gguf": ["mradermacher/Llama-3-Nymeria-ELYZA-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
492
+ "suzume-llama-3-8B-japanese.Q4_K_M.gguf": ["PrunaAI/lightblue-suzume-llama-3-8B-japanese-GGUF-smashed", MessagesFormatterType.LLAMA_3],
493
+ "suzume-llama-3-8B-multilingual-orpo-borda-top25.Q4_K_M.gguf": ["RichardErkhov/lightblue_-_suzume-llama-3-8B-multilingual-orpo-borda-top25-gguf", MessagesFormatterType.LLAMA_3],
494
+ "Bungo-L3-8B.Q5_K_M.gguf": ["backyardai/Bungo-L3-8B-GGUF", MessagesFormatterType.LLAMA_3],
495
+ "ezo-common-t2-2b-gemma-2-it.Q6_K.gguf": ["keitokei1994/EZO-Common-T2-2B-gemma-2-it-GGUF", MessagesFormatterType.ALPACA],
496
+ "Llama-3-EZO-8b-Common-it.Q5_K_M.iMatrix.gguf": ["MCZK/Llama-3-EZO-8b-Common-it-GGUF", MessagesFormatterType.MISTRAL],
497
+ "EZO-Common-9B-gemma-2-it.i1-Q4_K_M.gguf": ["mradermacher/EZO-Common-9B-gemma-2-it-i1-GGUF", MessagesFormatterType.MISTRAL],
498
+ }
499
+ llm_formats = {
500
+ "MISTRAL": MessagesFormatterType.MISTRAL,
501
+ "CHATML": MessagesFormatterType.CHATML,
502
+ "VICUNA": MessagesFormatterType.VICUNA,
503
+ "LLAMA 2": MessagesFormatterType.LLAMA_2,
504
+ "SYNTHIA": MessagesFormatterType.SYNTHIA,
505
+ "NEURAL CHAT": MessagesFormatterType.NEURAL_CHAT,
506
+ "SOLAR": MessagesFormatterType.SOLAR,
507
+ "OPEN CHAT": MessagesFormatterType.OPEN_CHAT,
508
+ "ALPACA": MessagesFormatterType.ALPACA,
509
+ "CODE DS": MessagesFormatterType.CODE_DS,
510
+ "B22": MessagesFormatterType.B22,
511
+ "LLAMA 3": MessagesFormatterType.LLAMA_3,
512
+ "PHI 3": MessagesFormatterType.PHI_3,
513
+ "Autocoder": MessagesFormatterType.AUTOCODER,
514
+ "DeepSeek Coder v2": MessagesFormatterType.DEEP_SEEK_CODER_2,
515
+ "Gemma 2": MessagesFormatterType.ALPACA,
516
+ "Qwen2": MessagesFormatterType.OPEN_CHAT,
517
+ }
518
+ # https://github.com/Maximilian-Winter/llama-cpp-agent
519
+ llm_languages = ["English", "Japanese", "Chinese", "Korean", "Spanish", "Portuguese", "German", "French", "Finnish", "Russian"]
520
+ llm_models_tupled_list = []
521
+ default_llm_model_filename = list(llm_models.keys())[0]
522
+ override_llm_format = None
523
+
524
+
525
+ def to_list(s):
526
+ return [x.strip() for x in s.split(",") if not s == ""]
527
+
528
+
529
+ def list_uniq(l):
530
+ return sorted(set(l), key=l.index)
531
+
532
+
533
+ @wrapt_timeout_decorator.timeout(dec_timeout=3.5)
534
+ def to_list_ja(s):
535
+ import re
536
+ s = re.sub(r'[、。]', ',', s)
537
+ return [x.strip() for x in s.split(",") if not s == ""]
538
+
539
+
540
+ def is_japanese(s):
541
+ import unicodedata
542
+ for ch in s:
543
+ name = unicodedata.name(ch, "")
544
+ if "CJK UNIFIED" in name or "HIRAGANA" in name or "KATAKANA" in name:
545
+ return True
546
+ return False
547
+
548
+
549
+ def update_llm_model_tupled_list():
550
+ from pathlib import Path
551
+ global llm_models_tupled_list
552
+ llm_models_tupled_list = []
553
+ for k, v in llm_models.items():
554
+ name = k
555
+ value = k
556
+ llm_models_tupled_list.append((name, value))
557
+ model_files = Path(llm_models_dir).glob('*.gguf')
558
+ for path in model_files:
559
+ name = path.name
560
+ value = path.name
561
+ llm_models_tupled_list.append((name, value))
562
+ llm_models_tupled_list = list_uniq(llm_models_tupled_list)
563
+ return llm_models_tupled_list
564
+
565
+
566
+ def download_llm_models():
567
+ from huggingface_hub import hf_hub_download
568
+ global llm_models_tupled_list
569
+ llm_models_tupled_list = []
570
+ for k, v in llm_models.items():
571
+ try:
572
+ hf_hub_download(repo_id = v[0], filename = k, local_dir = llm_models_dir)
573
+ except Exception:
574
+ continue
575
+ name = k
576
+ value = k
577
+ llm_models_tupled_list.append((name, value))
578
+
579
+
580
+ def download_llm_model(filename):
581
+ from huggingface_hub import hf_hub_download
582
+ if not filename in llm_models.keys(): return default_llm_model_filename
583
+ try:
584
+ hf_hub_download(repo_id = llm_models[filename][0], filename = filename, local_dir = llm_models_dir)
585
+ except Exception as e:
586
+ print(e)
587
+ return default_llm_model_filename
588
+ update_llm_model_tupled_list()
589
+ return filename
590
+
591
+
592
+ def get_dolphin_model_info(filename):
593
+ md = "None"
594
+ items = llm_models.get(filename, None)
595
+ if items:
596
+ md = f'Repo: [{items[0]}](https://huggingface.co/{items[0]})'
597
+ return md
598
+
599
+
600
+ def select_dolphin_model(filename, progress=gr.Progress(track_tqdm=True)):
601
+ global override_llm_format
602
+ override_llm_format = None
603
+ progress(0, desc="Loading model...")
604
+ value = download_llm_model(filename)
605
+ progress(1, desc="Model loaded.")
606
+ md = get_dolphin_model_info(filename)
607
+ return gr.update(value=value, choices=get_dolphin_models()), gr.update(value=get_dolphin_model_format(value)), gr.update(value=md)
608
+
609
+
610
+ def select_dolphin_format(format_name):
611
+ global override_llm_format
612
+ override_llm_format = llm_formats[format_name]
613
+ return gr.update(value=format_name)
614
+
615
+
616
+ download_llm_model(default_llm_model_filename)
617
+
618
+
619
+ def get_dolphin_models():
620
+ return update_llm_model_tupled_list()
621
+
622
+
623
+ def get_llm_formats():
624
+ return list(llm_formats.keys())
625
+
626
+
627
+ def get_key_from_value(d, val):
628
+ keys = [k for k, v in d.items() if v == val]
629
+ if keys:
630
+ return keys[0]
631
+ return None
632
+
633
+
634
+ def get_dolphin_model_format(filename):
635
+ if not filename in llm_models.keys(): filename = default_llm_model_filename
636
+ format = llm_models[filename][1]
637
+ format_name = get_key_from_value(llm_formats, format)
638
+ return format_name
639
+
640
+
641
+ def add_dolphin_models(query, format_name):
642
+ import re
643
+ from huggingface_hub import HfApi
644
+ global llm_models
645
+ api = HfApi()
646
+ add_models = {}
647
+ format = llm_formats[format_name]
648
+ filename = ""
649
+ repo = ""
650
+ try:
651
+ s = list(re.findall(r'^(?:https?://huggingface.co/)?(.+?/.+?)(?:/.*/(.+?.gguf).*?)?$', query)[0])
652
+ if s and "" in s: s.remove("")
653
+ if len(s) == 1:
654
+ repo = s[0]
655
+ if not api.repo_exists(repo_id = repo): return gr.update(visible=True)
656
+ files = api.list_repo_files(repo_id = repo)
657
+ for file in files:
658
+ if str(file).endswith(".gguf"): add_models[filename] = [repo, format]
659
+ elif len(s) >= 2:
660
+ repo = s[0]
661
+ filename = s[1]
662
+ if not api.repo_exists(repo_id = repo) or not api.file_exists(repo_id = repo, filename = filename): return gr.update(visible=True)
663
+ add_models[filename] = [repo, format]
664
+ else: return gr.update(visible=True)
665
+ except Exception as e:
666
+ print(e)
667
+ return gr.update(visible=True)
668
+ #print(add_models)
669
+ llm_models = (llm_models | add_models).copy()
670
+ update_llm_model_tupled_list()
671
+ choices = get_dolphin_models()
672
+ return gr.update(choices=choices, value=choices[-1][1])
673
+
674
+
675
+ dolphin_output_language = "English"
676
+ dolphin_sysprompt_mode = "Default"
677
+ dolphin_system_prompt = {"Default": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
678
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
679
+ ```
680
+ [Tags]
681
+ - Words to describe full names of characters and names of series in which they appear.
682
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
683
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail, such as long hair.
684
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
685
+ - Words to describe their stance from head to toe in detail.
686
+ - Words to describe their acting, especially with sexual activity in detail.
687
+ - Words to describe their surroundings in detail.
688
+ - Words to describe background details, such as inside room, forest, starry sky.
689
+ [Rules]
690
+ - Any output should be plain text in English and don't use line breaks.
691
+ - Output only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
692
+ - Output should be in the format: "//GENBEGIN//://1girl, Tag, Tag, ..., Tag//://GENEND//".
693
+ - Preferably refer to and describe the information obtained from Danbooru. If not, describe it in own way.
694
+ - It's preferable that each Tag is a plain phrase, word, caption, Danbooru tag, or E621 tag.
695
+ - Convert any nicknames to full names first.
696
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
697
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into an detailed English sentences based on the central character internally.
698
+ - Split sentences into short phrases or words, and then convert them to Tags.
699
+ - Use associated Danbooru tags, E621 tags.
700
+ - Same Tags should be used only once per output.
701
+ - Anyway, keep processing until you've finished outputting a message.
702
+ ```
703
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes:''',
704
+ "Strictly on themes": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
705
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
706
+ ```
707
+ [Tags]
708
+ - Words to describe full names of characters and names of series in which they appear.
709
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
710
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail, such as long hair.
711
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
712
+ - Words to describe their stance from head to toe in detail.
713
+ - Words to describe their acting, especially with sexual activity in detail.
714
+ - Words to describe their surroundings in detail.
715
+ - Words to describe background details, such as inside room, forest, starry sky.
716
+ [Rules]
717
+ - Any output should be plain text in English and don't use line breaks.
718
+ - Output only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
719
+ - Output should be in the format: "//GENBEGIN//://1girl, Tag, Tag, ..., Tag//://GENEND//".
720
+ - Preferably refer to and describe the information obtained from Danbooru. If not, describe it in own way.
721
+ - It's preferable that each Tag is a plain phrase, word, caption, Danbooru tag, or E621 tag.
722
+ - Convert any nicknames to full names first.
723
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
724
+ - Rewrite the given themes in plain English without changing the main idea.
725
+ - Split sentences into short phrases or words, and then convert them to Tags.
726
+ - Use associated Danbooru tags, E621 tags.
727
+ - Same Tags should be used only once per output.
728
+ - Anyway, keep processing until you've finished outputting a message.
729
+ ```
730
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes:''',
731
+ "With description": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
732
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
733
+ ```
734
+ [Tags]
735
+ - Words to describe full names of characters and names of series in which they appear.
736
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
737
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail, such as long hair.
738
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
739
+ - Words to describe their stance from head to toe in detail.
740
+ - Words to describe their acting, especially with sexual activity in detail.
741
+ - Words to describe their surroundings in detail.
742
+ - Words to describe background details, such as inside room, forest, starry sky.
743
+ [Rules]
744
+ - Any Tags should be plain text in English and don't use line breaks.
745
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
746
+ - Message should be in the format: "//GENBEGIN//://1girl, Tag, Tag, ..., Tag//://GENEND//".
747
+ - Preferably refer to and describe the information obtained from Danbooru. If not, describe it in own way.
748
+ - It's preferable that each Tag is a plain phrase, word, caption, Danbooru tag, or E621 tag.
749
+ - Convert any nicknames to full names first.
750
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
751
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into an detailed English sentences based on the central character internally.
752
+ - Split sentences into short phrases or words, and then convert them to Tags.
753
+ - Use associated Danbooru tags, E621 tags.
754
+ - Same Tags should be used only once per output.
755
+ - Anyway, keep processing until you've finished outputting a message.
756
+ ```
757
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes,
758
+ then describe the message you've generated in short, in <LANGUAGE>.:''',
759
+ "With dialogue and description": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
760
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
761
+ ```
762
+ [Tags]
763
+ - Words to describe full names of characters and names of series in which they appear.
764
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
765
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail, such as long hair.
766
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
767
+ - Words to describe their stance from head to toe in detail.
768
+ - Words to describe their acting, especially with sexual activity in detail.
769
+ - Words to describe their surroundings in detail.
770
+ - Words to describe background details, such as inside room, forest, starry sky.
771
+ [Rules]
772
+ - Any Tags should be plain text in English and don't use line breaks.
773
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
774
+ - Message should be in the format: "//GENBEGIN//://1girl, Tag, Tag, ..., Tag//://GENEND//".
775
+ - Preferably refer to and describe the information obtained from Danbooru. If not, describe it in own way.
776
+ - It's preferable that each Tag is a plain phrase, word, caption, Danbooru tag, or E621 tag.
777
+ - Convert any nicknames to full names first.
778
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
779
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into an detailed English sentences based on the central character internally.
780
+ - Split sentences into short phrases or words, and then convert them to Tags.
781
+ - Use associated Danbooru tags, E621 tags.
782
+ - Same Tags should be used only once per output.
783
+ - Anyway, keep processing until you've finished outputting a message.
784
+ ```
785
+ Based on these Rules, please tell me message within 40 Tags that can generate an image for the following themes,
786
+ then write the character's long actor's line composed of one's voices and moaning and voices in thought, based on the story you have assembled, in <LANGUAGE>,
787
+ enclosed in //VOICEBEGIN//:// and //://VOICEEND//, then describe the message you've generated in short, in <LANGUAGE>.:''',
788
+ "Longer prompt": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
789
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
790
+ ```
791
+ [Tags]
792
+ - Words to describe full names of characters and names of series in which they appear.
793
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
794
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail.
795
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
796
+ - Words to describe their stance from head to toe in detail.
797
+ - Words to describe their acting, especially with sexual activity in detail.
798
+ - Words to describe their surroundings in detail.
799
+ - Words to describe background details.
800
+ [Rules]
801
+ - Any Tags should be plain text in English and don't use line breaks.
802
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
803
+ - Message should be enclosed in //GENBEGIN//:// and //://GENEND//.
804
+ - Convert any nicknames to full names first.
805
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
806
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into detailed English text based on the central character internally.
807
+ - Tags can be in the form of sentences.
808
+ - You can also use Danbooru tags, E621 tags as Tags.
809
+ - Anyway, keep processing until you've finished outputting a message.
810
+ ```
811
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes:''',
812
+ "Longer prompt strictly on themes": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
813
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
814
+ ```
815
+ [Tags]
816
+ - Words to describe full names of characters and names of series in which they appear.
817
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
818
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail.
819
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
820
+ - Words to describe their stance from head to toe in detail.
821
+ - Words to describe their acting, especially with sexual activity in detail.
822
+ - Words to describe their surroundings in detail.
823
+ - Words to describe background details.
824
+ [Rules]
825
+ - Any Tags should be plain text in English and don't use line breaks.
826
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
827
+ - Message should be enclosed in //GENBEGIN//:// and //://GENEND//.
828
+ - Convert any nicknames to full names first.
829
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
830
+ - Rewrite the given themes in plain English without changing the main idea.
831
+ - Tags can be in the form of sentences.
832
+ - You can also use Danbooru tags, E621 tags as Tags.
833
+ - Anyway, keep processing until you've finished outputting a message.
834
+ ```
835
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes:''',
836
+ "Longer prompt with description": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
837
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
838
+ ```
839
+ [Tags]
840
+ - Words to describe full names of characters and names of series in which they appear.
841
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
842
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail.
843
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
844
+ - Words to describe their stance from head to toe in detail.
845
+ - Words to describe their acting, especially with sexual activity in detail.
846
+ - Words to describe their surroundings in detail.
847
+ - Words to describe background details.
848
+ [Rules]
849
+ - Any Tags should be plain text in English and don't use line breaks.
850
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
851
+ - Message should be enclosed in //GENBEGIN//:// and //://GENEND//.
852
+ - Convert any nicknames to full names first.
853
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
854
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into detailed English text based on the central character internally.
855
+ - Tags can be in the form of sentences.
856
+ - You can also use Danbooru tags, E621 tags as Tags.
857
+ - Anyway, keep processing until you've finished outputting a message.
858
+ ```
859
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes,
860
+ then describe the message you've generated in short, in <LANGUAGE>.:''',
861
+ "Japanese to Danbooru Dictionary": r"""You are a helpful AI assistant.
862
+ Extract Japanese words from the following sentences and output them separated by commas. Convert words in their original forms.
863
+ Output should be enclosed in //GENBEGIN//:// and //://GENEND//. The text to be given is as follows:""",
864
+ "Chat with LLM": r"You are a helpful AI assistant. Respond in <LANGUAGE>."}
865
+
866
+
867
+ def get_dolphin_sysprompt():
868
+ import re
869
+ prompt = re.sub('<LANGUAGE>', dolphin_output_language, dolphin_system_prompt.get(dolphin_sysprompt_mode, ""))
870
+ return prompt
871
+
872
+
873
+ def get_dolphin_sysprompt_mode():
874
+ return list(dolphin_system_prompt.keys())
875
+
876
+
877
+ def select_dolphin_sysprompt(key: str):
878
+ global dolphin_sysprompt_mode
879
+ if not key in dolphin_system_prompt.keys():
880
+ dolphin_sysprompt_mode = "Default"
881
+ else:
882
+ dolphin_sysprompt_mode = key
883
+ return gr.update(value=get_dolphin_sysprompt())
884
+
885
+
886
+ def get_dolphin_languages():
887
+ return llm_languages
888
+
889
+
890
+ def select_dolphin_language(lang: str):
891
+ global dolphin_output_language
892
+ dolphin_output_language = lang
893
+ return gr.update(value=get_dolphin_sysprompt())
894
+
895
+
896
+ @wrapt_timeout_decorator.timeout(dec_timeout=5.0)
897
+ def get_raw_prompt(msg: str):
898
+ import re
899
+ m = re.findall(r'/GENBEGIN/(.+?)/GENEND/', msg, re.DOTALL)
900
+ return re.sub(r'[*/:_"#\n]', ' ', ", ".join(m)).lower() if m else ""
901
+
902
+
903
+ @spaces.GPU(duration=60)
904
+ def dolphin_respond(
905
+ message: str,
906
+ history: list[tuple[str, str]],
907
+ model: str = default_llm_model_filename,
908
+ system_message: str = get_dolphin_sysprompt(),
909
+ max_tokens: int = 1024,
910
+ temperature: float = 0.7,
911
+ top_p: float = 0.95,
912
+ top_k: int = 40,
913
+ repeat_penalty: float = 1.1,
914
+ progress=gr.Progress(track_tqdm=True),
915
+ ):
916
+ from pathlib import Path
917
+ progress(0, desc="Processing...")
918
+
919
+ if override_llm_format:
920
+ chat_template = override_llm_format
921
+ else:
922
+ chat_template = llm_models[model][1]
923
+
924
+ llm = Llama(
925
+ model_path=str(Path(f"{llm_models_dir}/{model}")),
926
+ flash_attn=True,
927
+ n_gpu_layers=81, # 81
928
+ n_batch=1024,
929
+ n_ctx=8192, #8192
930
+ )
931
+ provider = LlamaCppPythonProvider(llm)
932
+
933
+ agent = LlamaCppAgent(
934
+ provider,
935
+ system_prompt=f"{system_message}",
936
+ predefined_messages_formatter_type=chat_template,
937
+ debug_output=False
938
+ )
939
+
940
+ settings = provider.get_provider_default_settings()
941
+ settings.temperature = temperature
942
+ settings.top_k = top_k
943
+ settings.top_p = top_p
944
+ settings.max_tokens = max_tokens
945
+ settings.repeat_penalty = repeat_penalty
946
+ settings.stream = True
947
+
948
+ messages = BasicChatHistory()
949
+
950
+ for msn in history:
951
+ user = {
952
+ 'role': Roles.user,
953
+ 'content': msn[0]
954
+ }
955
+ assistant = {
956
+ 'role': Roles.assistant,
957
+ 'content': msn[1]
958
+ }
959
+ messages.add_message(user)
960
+ messages.add_message(assistant)
961
+
962
+ stream = agent.get_chat_response(
963
+ message,
964
+ llm_sampling_settings=settings,
965
+ chat_history=messages,
966
+ returns_streaming_generator=True,
967
+ print_output=False
968
+ )
969
+
970
+ progress(0.5, desc="Processing...")
971
+
972
+ outputs = ""
973
+ for output in stream:
974
+ outputs += output
975
+ yield [(outputs, None)]
976
+
977
+
978
+ def dolphin_parse(
979
+ history: list[tuple[str, str]],
980
+ ):
981
+ if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1:
982
+ return "", gr.update(visible=True), gr.update(visible=True)
983
+ try:
984
+ msg = history[-1][0]
985
+ raw_prompt = get_raw_prompt(msg)
986
+ except Exception:
987
+ return "", gr.update(visible=True), gr.update(visible=True)
988
+ prompts = []
989
+ if dolphin_sysprompt_mode == "Japanese to Danbooru Dictionary" and is_japanese(raw_prompt):
990
+ prompts = list_uniq(jatags_to_danbooru_tags(to_list_ja(raw_prompt)) + ["nsfw", "explicit"])
991
+ else:
992
+ prompts = list_uniq(to_list(raw_prompt) + ["nsfw", "explicit"])
993
+ return ", ".join(prompts), gr.update(interactive=True), gr.update(interactive=True)
994
+
995
+
996
+ @spaces.GPU(duration=60)
997
+ def dolphin_respond_auto(
998
+ message: str,
999
+ history: list[tuple[str, str]],
1000
+ model: str = default_llm_model_filename,
1001
+ system_message: str = get_dolphin_sysprompt(),
1002
+ max_tokens: int = 1024,
1003
+ temperature: float = 0.7,
1004
+ top_p: float = 0.95,
1005
+ top_k: int = 40,
1006
+ repeat_penalty: float = 1.1,
1007
+ progress=gr.Progress(track_tqdm=True),
1008
+ ):
1009
+ #if not is_japanese(message): return [(None, None)]
1010
+ from pathlib import Path
1011
+ progress(0, desc="Processing...")
1012
+
1013
+ if override_llm_format:
1014
+ chat_template = override_llm_format
1015
+ else:
1016
+ chat_template = llm_models[model][1]
1017
+
1018
+ llm = Llama(
1019
+ model_path=str(Path(f"{llm_models_dir}/{model}")),
1020
+ flash_attn=True,
1021
+ n_gpu_layers=81, # 81
1022
+ n_batch=1024,
1023
+ n_ctx=8192, #8192
1024
+ )
1025
+ provider = LlamaCppPythonProvider(llm)
1026
+
1027
+ agent = LlamaCppAgent(
1028
+ provider,
1029
+ system_prompt=f"{system_message}",
1030
+ predefined_messages_formatter_type=chat_template,
1031
+ debug_output=False
1032
+ )
1033
+
1034
+ settings = provider.get_provider_default_settings()
1035
+ settings.temperature = temperature
1036
+ settings.top_k = top_k
1037
+ settings.top_p = top_p
1038
+ settings.max_tokens = max_tokens
1039
+ settings.repeat_penalty = repeat_penalty
1040
+ settings.stream = True
1041
+
1042
+ messages = BasicChatHistory()
1043
+
1044
+ for msn in history:
1045
+ user = {
1046
+ 'role': Roles.user,
1047
+ 'content': msn[0]
1048
+ }
1049
+ assistant = {
1050
+ 'role': Roles.assistant,
1051
+ 'content': msn[1]
1052
+ }
1053
+ messages.add_message(user)
1054
+ messages.add_message(assistant)
1055
+
1056
+ progress(0, desc="Translating...")
1057
+ stream = agent.get_chat_response(
1058
+ message,
1059
+ llm_sampling_settings=settings,
1060
+ chat_history=messages,
1061
+ returns_streaming_generator=True,
1062
+ print_output=False
1063
+ )
1064
+
1065
+ progress(0.5, desc="Processing...")
1066
+
1067
+ outputs = ""
1068
+ for output in stream:
1069
+ outputs += output
1070
+ yield [(outputs, None)]
1071
+
1072
+
1073
+ def dolphin_parse_simple(
1074
+ message: str,
1075
+ history: list[tuple[str, str]],
1076
+ ):
1077
+ #if not is_japanese(message): return message
1078
+ if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1: return message
1079
+ try:
1080
+ msg = history[-1][0]
1081
+ raw_prompt = get_raw_prompt(msg)
1082
+ except Exception:
1083
+ return ""
1084
+ prompts = []
1085
+ if dolphin_sysprompt_mode == "Japanese to Danbooru Dictionary" and is_japanese(raw_prompt):
1086
+ prompts = list_uniq(jatags_to_danbooru_tags(to_list_ja(raw_prompt)) + ["nsfw", "explicit", "rating_explicit"])
1087
+ else:
1088
+ prompts = list_uniq(to_list(raw_prompt) + ["nsfw", "explicit", "rating_explicit"])
1089
+ return ", ".join(prompts)
lora_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
model_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
modutils.py ADDED
@@ -0,0 +1,1240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import json
3
+ import gradio as gr
4
+ from huggingface_hub import HfApi
5
+ import os
6
+ from pathlib import Path
7
+
8
+
9
+ from env import (HF_LORA_PRIVATE_REPOS1, HF_LORA_PRIVATE_REPOS2,
10
+ HF_MODEL_USER_EX, HF_MODEL_USER_LIKES,
11
+ directory_loras, hf_read_token, hf_token, CIVITAI_API_KEY)
12
+
13
+
14
+ def get_user_agent():
15
+ return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
16
+
17
+
18
+ def to_list(s):
19
+ return [x.strip() for x in s.split(",") if not s == ""]
20
+
21
+
22
+ def list_uniq(l):
23
+ return sorted(set(l), key=l.index)
24
+
25
+
26
+ def list_sub(a, b):
27
+ return [e for e in a if e not in b]
28
+
29
+
30
+ from translatepy import Translator
31
+ translator = Translator()
32
+ def translate_to_en(input: str):
33
+ try:
34
+ output = str(translator.translate(input, 'English'))
35
+ except Exception as e:
36
+ output = input
37
+ print(e)
38
+ return output
39
+
40
+
41
+ def get_local_model_list(dir_path):
42
+ model_list = []
43
+ valid_extensions = ('.ckpt', '.pt', '.pth', '.safetensors', '.bin')
44
+ for file in Path(dir_path).glob("*"):
45
+ if file.suffix in valid_extensions:
46
+ file_path = str(Path(f"{dir_path}/{file.name}"))
47
+ model_list.append(file_path)
48
+ return model_list
49
+
50
+
51
+ def download_things(directory, url, hf_token="", civitai_api_key=""):
52
+ url = url.strip()
53
+ if "drive.google.com" in url:
54
+ original_dir = os.getcwd()
55
+ os.chdir(directory)
56
+ os.system(f"gdown --fuzzy {url}")
57
+ os.chdir(original_dir)
58
+ elif "huggingface.co" in url:
59
+ url = url.replace("?download=true", "")
60
+ # url = urllib.parse.quote(url, safe=':/') # fix encoding
61
+ if "/blob/" in url:
62
+ url = url.replace("/blob/", "/resolve/")
63
+ user_header = f'"Authorization: Bearer {hf_token}"'
64
+ if hf_token:
65
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
66
+ else:
67
+ os.system (f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
68
+ elif "civitai.com" in url:
69
+ if "?" in url:
70
+ url = url.split("?")[0]
71
+ if civitai_api_key:
72
+ url = url + f"?token={civitai_api_key}"
73
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
74
+ else:
75
+ print("\033[91mYou need an API key to download Civitai models.\033[0m")
76
+ else:
77
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
78
+
79
+
80
+ def escape_lora_basename(basename: str):
81
+ return basename.replace(".", "_").replace(" ", "_").replace(",", "")
82
+
83
+
84
+ def to_lora_key(path: str):
85
+ return escape_lora_basename(Path(path).stem)
86
+
87
+
88
+ def to_lora_path(key: str):
89
+ if Path(key).is_file(): return key
90
+ path = Path(f"{directory_loras}/{escape_lora_basename(key)}.safetensors")
91
+ return str(path)
92
+
93
+
94
+ def safe_float(input):
95
+ output = 1.0
96
+ try:
97
+ output = float(input)
98
+ except Exception:
99
+ output = 1.0
100
+ return output
101
+
102
+
103
+ def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)):
104
+ from datetime import datetime, timezone, timedelta
105
+ progress(0, desc="Updating gallery...")
106
+ dt_now = datetime.now(timezone(timedelta(hours=9)))
107
+ basename = dt_now.strftime('%Y%m%d_%H%M%S_')
108
+ i = 1
109
+ if not images: return images
110
+ output_images = []
111
+ output_paths = []
112
+ for image in images:
113
+ filename = basename + str(i) + ".png"
114
+ i += 1
115
+ oldpath = Path(image[0])
116
+ newpath = oldpath
117
+ try:
118
+ if oldpath.exists():
119
+ newpath = oldpath.resolve().rename(Path(filename).resolve())
120
+ except Exception as e:
121
+ print(e)
122
+ finally:
123
+ output_paths.append(str(newpath))
124
+ output_images.append((str(newpath), str(filename)))
125
+ progress(1, desc="Gallery updated.")
126
+ return gr.update(value=output_images), gr.update(value=output_paths), gr.update(visible=True)
127
+
128
+
129
+ def download_private_repo(repo_id, dir_path, is_replace):
130
+ from huggingface_hub import snapshot_download
131
+ if not hf_read_token: return
132
+ try:
133
+ snapshot_download(repo_id=repo_id, local_dir=dir_path, allow_patterns=['*.ckpt', '*.pt', '*.pth', '*.safetensors', '*.bin'], use_auth_token=hf_read_token)
134
+ except Exception as e:
135
+ print(f"Error: Failed to download {repo_id}.")
136
+ print(e)
137
+ return
138
+ if is_replace:
139
+ for file in Path(dir_path).glob("*"):
140
+ if file.exists() and "." in file.stem or " " in file.stem and file.suffix in ['.ckpt', '.pt', '.pth', '.safetensors', '.bin']:
141
+ newpath = Path(f'{file.parent.name}/{escape_lora_basename(file.stem)}{file.suffix}')
142
+ file.resolve().rename(newpath.resolve())
143
+
144
+
145
+ private_model_path_repo_dict = {} # {"local filepath": "huggingface repo_id", ...}
146
+
147
+
148
+ def get_private_model_list(repo_id, dir_path):
149
+ global private_model_path_repo_dict
150
+ api = HfApi()
151
+ if not hf_read_token: return []
152
+ try:
153
+ files = api.list_repo_files(repo_id, token=hf_read_token)
154
+ except Exception as e:
155
+ print(f"Error: Failed to list {repo_id}.")
156
+ print(e)
157
+ return []
158
+ model_list = []
159
+ for file in files:
160
+ path = Path(f"{dir_path}/{file}")
161
+ if path.suffix in ['.ckpt', '.pt', '.pth', '.safetensors', '.bin']:
162
+ model_list.append(str(path))
163
+ for model in model_list:
164
+ private_model_path_repo_dict[model] = repo_id
165
+ return model_list
166
+
167
+
168
+ def download_private_file(repo_id, path, is_replace):
169
+ from huggingface_hub import hf_hub_download
170
+ file = Path(path)
171
+ newpath = Path(f'{file.parent.name}/{escape_lora_basename(file.stem)}{file.suffix}') if is_replace else file
172
+ if not hf_read_token or newpath.exists(): return
173
+ filename = file.name
174
+ dirname = file.parent.name
175
+ try:
176
+ hf_hub_download(repo_id=repo_id, filename=filename, local_dir=dirname, use_auth_token=hf_read_token)
177
+ except Exception as e:
178
+ print(f"Error: Failed to download {filename}.")
179
+ print(e)
180
+ return
181
+ if is_replace:
182
+ file.resolve().rename(newpath.resolve())
183
+
184
+
185
+ def download_private_file_from_somewhere(path, is_replace):
186
+ if not path in private_model_path_repo_dict.keys(): return
187
+ repo_id = private_model_path_repo_dict.get(path, None)
188
+ download_private_file(repo_id, path, is_replace)
189
+
190
+
191
+ model_id_list = []
192
+ def get_model_id_list():
193
+ global model_id_list
194
+ if len(model_id_list) != 0: return model_id_list
195
+ api = HfApi()
196
+ model_ids = []
197
+ try:
198
+ models_likes = []
199
+ for author in HF_MODEL_USER_LIKES:
200
+ models_likes.extend(api.list_models(author=author, task="text-to-image", cardData=True, sort="likes"))
201
+ models_ex = []
202
+ for author in HF_MODEL_USER_EX:
203
+ models_ex = api.list_models(author=author, task="text-to-image", cardData=True, sort="last_modified")
204
+ except Exception as e:
205
+ print(f"Error: Failed to list {author}'s models.")
206
+ print(e)
207
+ return model_ids
208
+ for model in models_likes:
209
+ model_ids.append(model.id) if not model.private else ""
210
+ anime_models = []
211
+ real_models = []
212
+ for model in models_ex:
213
+ if not model.private and not model.gated and "diffusers:FluxPipeline" not in model.tags:
214
+ anime_models.append(model.id) if "anime" in model.tags else real_models.append(model.id)
215
+ model_ids.extend(anime_models)
216
+ model_ids.extend(real_models)
217
+ model_id_list = model_ids.copy()
218
+ return model_ids
219
+
220
+
221
+ model_id_list = get_model_id_list()
222
+
223
+
224
+ def get_t2i_model_info(repo_id: str):
225
+ api = HfApi()
226
+ try:
227
+ if " " in repo_id or not api.repo_exists(repo_id): return ""
228
+ model = api.model_info(repo_id=repo_id)
229
+ except Exception as e:
230
+ print(f"Error: Failed to get {repo_id}'s info.")
231
+ print(e)
232
+ return ""
233
+ if model.private or model.gated: return ""
234
+ tags = model.tags
235
+ info = []
236
+ url = f"https://huggingface.co/{repo_id}/"
237
+ if not 'diffusers' in tags: return ""
238
+ if 'diffusers:FluxPipeline' in tags: info.append("FLUX.1")
239
+ elif 'diffusers:StableDiffusionXLPipeline' in tags: info.append("SDXL")
240
+ elif 'diffusers:StableDiffusionPipeline' in tags: info.append("SD1.5")
241
+ if model.card_data and model.card_data.tags:
242
+ info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
243
+ info.append(f"DLs: {model.downloads}")
244
+ info.append(f"likes: {model.likes}")
245
+ info.append(model.last_modified.strftime("lastmod: %Y-%m-%d"))
246
+ md = f"Model Info: {', '.join(info)}, [Model Repo]({url})"
247
+ return gr.update(value=md)
248
+
249
+
250
+ def get_tupled_model_list(model_list):
251
+ if not model_list: return []
252
+ tupled_list = []
253
+ for repo_id in model_list:
254
+ api = HfApi()
255
+ try:
256
+ if not api.repo_exists(repo_id): continue
257
+ model = api.model_info(repo_id=repo_id)
258
+ except Exception as e:
259
+ print(e)
260
+ continue
261
+ if model.private or model.gated: continue
262
+ tags = model.tags
263
+ info = []
264
+ if not 'diffusers' in tags: continue
265
+ if 'diffusers:FluxPipeline' in tags:
266
+ info.append("FLUX.1")
267
+ if 'diffusers:StableDiffusionXLPipeline' in tags:
268
+ info.append("SDXL")
269
+ elif 'diffusers:StableDiffusionPipeline' in tags:
270
+ info.append("SD1.5")
271
+ if model.card_data and model.card_data.tags:
272
+ info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
273
+ if "pony" in info:
274
+ info.remove("pony")
275
+ name = f"{repo_id} (Pony🐴, {', '.join(info)})"
276
+ else:
277
+ name = f"{repo_id} ({', '.join(info)})"
278
+ tupled_list.append((name, repo_id))
279
+ return tupled_list
280
+
281
+
282
+ private_lora_dict = {}
283
+ try:
284
+ with open('lora_dict.json', encoding='utf-8') as f:
285
+ d = json.load(f)
286
+ for k, v in d.items():
287
+ private_lora_dict[escape_lora_basename(k)] = v
288
+ except Exception as e:
289
+ print(e)
290
+ loras_dict = {"None": ["", "", "", "", ""], "": ["", "", "", "", ""]} | private_lora_dict.copy()
291
+ civitai_not_exists_list = []
292
+ loras_url_to_path_dict = {} # {"URL to download": "local filepath", ...}
293
+ civitai_lora_last_results = {} # {"URL to download": {search results}, ...}
294
+ all_lora_list = []
295
+
296
+
297
+ private_lora_model_list = []
298
+ def get_private_lora_model_lists():
299
+ global private_lora_model_list
300
+ if len(private_lora_model_list) != 0: return private_lora_model_list
301
+ models1 = []
302
+ models2 = []
303
+ for repo in HF_LORA_PRIVATE_REPOS1:
304
+ models1.extend(get_private_model_list(repo, directory_loras))
305
+ for repo in HF_LORA_PRIVATE_REPOS2:
306
+ models2.extend(get_private_model_list(repo, directory_loras))
307
+ models = list_uniq(models1 + sorted(models2))
308
+ private_lora_model_list = models.copy()
309
+ return models
310
+
311
+
312
+ private_lora_model_list = get_private_lora_model_lists()
313
+
314
+
315
+ def get_civitai_info(path):
316
+ global civitai_not_exists_list
317
+ import requests
318
+ from urllib3.util import Retry
319
+ from requests.adapters import HTTPAdapter
320
+ if path in set(civitai_not_exists_list): return ["", "", "", "", ""]
321
+ if not Path(path).exists(): return None
322
+ user_agent = get_user_agent()
323
+ headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
324
+ base_url = 'https://civitai.com/api/v1/model-versions/by-hash/'
325
+ params = {}
326
+ session = requests.Session()
327
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
328
+ session.mount("https://", HTTPAdapter(max_retries=retries))
329
+ import hashlib
330
+ with open(path, 'rb') as file:
331
+ file_data = file.read()
332
+ hash_sha256 = hashlib.sha256(file_data).hexdigest()
333
+ url = base_url + hash_sha256
334
+ try:
335
+ r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
336
+ except Exception as e:
337
+ print(e)
338
+ return ["", "", "", "", ""]
339
+ if not r.ok: return None
340
+ json = r.json()
341
+ if not 'baseModel' in json:
342
+ civitai_not_exists_list.append(path)
343
+ return ["", "", "", "", ""]
344
+ items = []
345
+ items.append(" / ".join(json['trainedWords']))
346
+ items.append(json['baseModel'])
347
+ items.append(json['model']['name'])
348
+ items.append(f"https://civitai.com/models/{json['modelId']}")
349
+ items.append(json['images'][0]['url'])
350
+ return items
351
+
352
+
353
+ def get_lora_model_list():
354
+ loras = list_uniq(get_private_lora_model_lists() + get_local_model_list(directory_loras))
355
+ loras.insert(0, "None")
356
+ loras.insert(0, "")
357
+ return loras
358
+
359
+
360
+ def get_all_lora_list():
361
+ global all_lora_list
362
+ loras = get_lora_model_list()
363
+ all_lora_list = loras.copy()
364
+ return loras
365
+
366
+
367
+ def get_all_lora_tupled_list():
368
+ global loras_dict
369
+ models = get_all_lora_list()
370
+ if not models: return []
371
+ tupled_list = []
372
+ for model in models:
373
+ #if not model: continue # to avoid GUI-related bug
374
+ basename = Path(model).stem
375
+ key = to_lora_key(model)
376
+ items = None
377
+ if key in loras_dict.keys():
378
+ items = loras_dict.get(key, None)
379
+ else:
380
+ items = get_civitai_info(model)
381
+ if items != None:
382
+ loras_dict[key] = items
383
+ name = basename
384
+ value = model
385
+ if items and items[2] != "":
386
+ if items[1] == "Pony":
387
+ name = f"{basename} (for {items[1]}🐴, {items[2]})"
388
+ else:
389
+ name = f"{basename} (for {items[1]}, {items[2]})"
390
+ tupled_list.append((name, value))
391
+ return tupled_list
392
+
393
+
394
+ def update_lora_dict(path):
395
+ global loras_dict
396
+ key = escape_lora_basename(Path(path).stem)
397
+ if key in loras_dict.keys(): return
398
+ items = get_civitai_info(path)
399
+ if items == None: return
400
+ loras_dict[key] = items
401
+
402
+
403
+ def download_lora(dl_urls: str):
404
+ global loras_url_to_path_dict
405
+ dl_path = ""
406
+ before = get_local_model_list(directory_loras)
407
+ urls = []
408
+ for url in [url.strip() for url in dl_urls.split(',')]:
409
+ local_path = f"{directory_loras}/{url.split('/')[-1]}"
410
+ if not Path(local_path).exists():
411
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
412
+ urls.append(url)
413
+ after = get_local_model_list(directory_loras)
414
+ new_files = list_sub(after, before)
415
+ i = 0
416
+ for file in new_files:
417
+ path = Path(file)
418
+ if path.exists():
419
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
420
+ path.resolve().rename(new_path.resolve())
421
+ loras_url_to_path_dict[urls[i]] = str(new_path)
422
+ update_lora_dict(str(new_path))
423
+ dl_path = str(new_path)
424
+ i += 1
425
+ return dl_path
426
+
427
+
428
+ def copy_lora(path: str, new_path: str):
429
+ import shutil
430
+ if path == new_path: return new_path
431
+ cpath = Path(path)
432
+ npath = Path(new_path)
433
+ if cpath.exists():
434
+ try:
435
+ shutil.copy(str(cpath.resolve()), str(npath.resolve()))
436
+ except Exception as e:
437
+ print(e)
438
+ return None
439
+ update_lora_dict(str(npath))
440
+ return new_path
441
+ else:
442
+ return None
443
+
444
+
445
+ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str):
446
+ path = download_lora(dl_urls)
447
+ if path:
448
+ if not lora1 or lora1 == "None":
449
+ lora1 = path
450
+ elif not lora2 or lora2 == "None":
451
+ lora2 = path
452
+ elif not lora3 or lora3 == "None":
453
+ lora3 = path
454
+ elif not lora4 or lora4 == "None":
455
+ lora4 = path
456
+ elif not lora5 or lora5 == "None":
457
+ lora5 = path
458
+ choices = get_all_lora_tupled_list()
459
+ return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
460
+ gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
461
+
462
+
463
+ def get_valid_lora_name(query: str):
464
+ path = "None"
465
+ if not query or query == "None": return "None"
466
+ if to_lora_key(query) in loras_dict.keys(): return query
467
+ if query in loras_url_to_path_dict.keys():
468
+ path = loras_url_to_path_dict[query]
469
+ else:
470
+ path = to_lora_path(query.strip().split('/')[-1])
471
+ if Path(path).exists():
472
+ return path
473
+ elif "http" in query:
474
+ dl_file = download_lora(query)
475
+ if dl_file and Path(dl_file).exists(): return dl_file
476
+ else:
477
+ dl_file = find_similar_lora(query)
478
+ if dl_file and Path(dl_file).exists(): return dl_file
479
+ return "None"
480
+
481
+
482
+ def get_valid_lora_path(query: str):
483
+ path = None
484
+ if not query or query == "None": return None
485
+ if to_lora_key(query) in loras_dict.keys(): return query
486
+ if Path(path).exists():
487
+ return path
488
+ else:
489
+ return None
490
+
491
+
492
+ def get_valid_lora_wt(prompt: str, lora_path: str, lora_wt: float):
493
+ import re
494
+ wt = lora_wt
495
+ result = re.findall(f'<lora:{to_lora_key(lora_path)}:(.+?)>', prompt)
496
+ if not result: return wt
497
+ wt = safe_float(result[0][0])
498
+ return wt
499
+
500
+
501
+ def set_prompt_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
502
+ import re
503
+ if not "Classic" in str(prompt_syntax): return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
504
+ lora1 = get_valid_lora_name(lora1)
505
+ lora2 = get_valid_lora_name(lora2)
506
+ lora3 = get_valid_lora_name(lora3)
507
+ lora4 = get_valid_lora_name(lora4)
508
+ lora5 = get_valid_lora_name(lora5)
509
+ if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
510
+ lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
511
+ lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
512
+ lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
513
+ lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
514
+ lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
515
+ on1, label1, tag1, md1 = get_lora_info(lora1)
516
+ on2, label2, tag2, md2 = get_lora_info(lora2)
517
+ on3, label3, tag3, md3 = get_lora_info(lora3)
518
+ on4, label4, tag4, md4 = get_lora_info(lora4)
519
+ on5, label5, tag5, md5 = get_lora_info(lora5)
520
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
521
+ prompts = prompt.split(",") if prompt else []
522
+ for p in prompts:
523
+ p = str(p).strip()
524
+ if "<lora" in p:
525
+ result = re.findall(r'<lora:(.+?):(.+?)>', p)
526
+ if not result: continue
527
+ key = result[0][0]
528
+ wt = result[0][1]
529
+ path = to_lora_path(key)
530
+ if not key in loras_dict.keys() or not path:
531
+ path = get_valid_lora_name(path)
532
+ if not path or path == "None": continue
533
+ if path in lora_paths:
534
+ continue
535
+ elif not on1:
536
+ lora1 = path
537
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
538
+ lora1_wt = safe_float(wt)
539
+ on1 = True
540
+ elif not on2:
541
+ lora2 = path
542
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
543
+ lora2_wt = safe_float(wt)
544
+ on2 = True
545
+ elif not on3:
546
+ lora3 = path
547
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
548
+ lora3_wt = safe_float(wt)
549
+ on3 = True
550
+ elif not on4:
551
+ lora4 = path
552
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
553
+ lora4_wt = safe_float(wt)
554
+ on4, label4, tag4, md4 = get_lora_info(lora4)
555
+ elif not on5:
556
+ lora5 = path
557
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
558
+ lora5_wt = safe_float(wt)
559
+ on5 = True
560
+ return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
561
+
562
+
563
+ def get_lora_info(lora_path: str):
564
+ is_valid = False
565
+ tag = ""
566
+ label = ""
567
+ md = "None"
568
+ if not lora_path or lora_path == "None":
569
+ print("LoRA file not found.")
570
+ return is_valid, label, tag, md
571
+ path = Path(lora_path)
572
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
573
+ if not to_lora_key(str(new_path)) in loras_dict.keys() and str(path) not in set(get_all_lora_list()):
574
+ print("LoRA file is not registered.")
575
+ return tag, label, tag, md
576
+ if not new_path.exists():
577
+ download_private_file_from_somewhere(str(path), True)
578
+ basename = new_path.stem
579
+ label = f'Name: {basename}'
580
+ items = loras_dict.get(basename, None)
581
+ if items == None:
582
+ items = get_civitai_info(str(new_path))
583
+ if items != None:
584
+ loras_dict[basename] = items
585
+ if items and items[2] != "":
586
+ tag = items[0]
587
+ label = f'Name: {basename}'
588
+ if items[1] == "Pony":
589
+ label = f'Name: {basename} (for Pony🐴)'
590
+ if items[4]:
591
+ md = f'<img src="{items[4]}" alt="thumbnail" width="150" height="240"><br>[LoRA Model URL]({items[3]})'
592
+ elif items[3]:
593
+ md = f'[LoRA Model URL]({items[3]})'
594
+ is_valid = True
595
+ return is_valid, label, tag, md
596
+
597
+
598
+ def normalize_prompt_list(tags: list[str]):
599
+ prompts = []
600
+ for tag in tags:
601
+ tag = str(tag).strip()
602
+ if tag:
603
+ prompts.append(tag)
604
+ return prompts
605
+
606
+
607
+ def apply_lora_prompt(prompt: str = "", lora_info: str = ""):
608
+ if lora_info == "None": return gr.update(value=prompt)
609
+ tags = prompt.split(",") if prompt else []
610
+ prompts = normalize_prompt_list(tags)
611
+
612
+ lora_tag = lora_info.replace("/",",")
613
+ lora_tags = lora_tag.split(",") if str(lora_info) != "None" else []
614
+ lora_prompts = normalize_prompt_list(lora_tags)
615
+
616
+ empty = [""]
617
+ prompt = ", ".join(list_uniq(prompts + lora_prompts) + empty)
618
+ return gr.update(value=prompt)
619
+
620
+
621
+ def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
622
+ import re
623
+ on1, label1, tag1, md1 = get_lora_info(lora1)
624
+ on2, label2, tag2, md2 = get_lora_info(lora2)
625
+ on3, label3, tag3, md3 = get_lora_info(lora3)
626
+ on4, label4, tag4, md4 = get_lora_info(lora4)
627
+ on5, label5, tag5, md5 = get_lora_info(lora5)
628
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
629
+
630
+ output_prompt = prompt
631
+ if "Classic" in str(prompt_syntax):
632
+ prompts = prompt.split(",") if prompt else []
633
+ output_prompts = []
634
+ for p in prompts:
635
+ p = str(p).strip()
636
+ if "<lora" in p:
637
+ result = re.findall(r'<lora:(.+?):(.+?)>', p)
638
+ if not result: continue
639
+ key = result[0][0]
640
+ wt = result[0][1]
641
+ path = to_lora_path(key)
642
+ if not key in loras_dict.keys() or not path: continue
643
+ if path in lora_paths:
644
+ output_prompts.append(f"<lora:{to_lora_key(path)}:{safe_float(wt):.2f}>")
645
+ elif p:
646
+ output_prompts.append(p)
647
+ lora_prompts = []
648
+ if on1: lora_prompts.append(f"<lora:{to_lora_key(lora1)}:{lora1_wt:.2f}>")
649
+ if on2: lora_prompts.append(f"<lora:{to_lora_key(lora2)}:{lora2_wt:.2f}>")
650
+ if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
651
+ if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
652
+ if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
653
+ output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
654
+ choices = get_all_lora_tupled_list()
655
+
656
+ return gr.update(value=output_prompt), gr.update(value=lora1, choices=choices), gr.update(value=lora1_wt),\
657
+ gr.update(value=tag1, label=label1, visible=on1), gr.update(visible=on1), gr.update(value=md1, visible=on1),\
658
+ gr.update(value=lora2, choices=choices), gr.update(value=lora2_wt),\
659
+ gr.update(value=tag2, label=label2, visible=on2), gr.update(visible=on2), gr.update(value=md2, visible=on2),\
660
+ gr.update(value=lora3, choices=choices), gr.update(value=lora3_wt),\
661
+ gr.update(value=tag3, label=label3, visible=on3), gr.update(visible=on3), gr.update(value=md3, visible=on3),\
662
+ gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
663
+ gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
664
+ gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
665
+ gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
666
+
667
+
668
+ def get_my_lora(link_url):
669
+ from pathlib import Path
670
+ before = get_local_model_list(directory_loras)
671
+ for url in [url.strip() for url in link_url.split(',')]:
672
+ if not Path(f"{directory_loras}/{url.split('/')[-1]}").exists():
673
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
674
+ after = get_local_model_list(directory_loras)
675
+ new_files = list_sub(after, before)
676
+ for file in new_files:
677
+ path = Path(file)
678
+ if path.exists():
679
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
680
+ path.resolve().rename(new_path.resolve())
681
+ update_lora_dict(str(new_path))
682
+ new_lora_model_list = get_lora_model_list()
683
+ new_lora_tupled_list = get_all_lora_tupled_list()
684
+
685
+ return gr.update(
686
+ choices=new_lora_tupled_list, value=new_lora_model_list[-1]
687
+ ), gr.update(
688
+ choices=new_lora_tupled_list
689
+ ), gr.update(
690
+ choices=new_lora_tupled_list
691
+ ), gr.update(
692
+ choices=new_lora_tupled_list
693
+ ), gr.update(
694
+ choices=new_lora_tupled_list
695
+ )
696
+
697
+
698
+ def upload_file_lora(files, progress=gr.Progress(track_tqdm=True)):
699
+ progress(0, desc="Uploading...")
700
+ file_paths = [file.name for file in files]
701
+ progress(1, desc="Uploaded.")
702
+ return gr.update(value=file_paths, visible=True), gr.update(visible=True)
703
+
704
+
705
+ def move_file_lora(filepaths):
706
+ import shutil
707
+ for file in filepaths:
708
+ path = Path(shutil.move(Path(file).resolve(), Path(f"./{directory_loras}").resolve()))
709
+ newpath = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
710
+ path.resolve().rename(newpath.resolve())
711
+ update_lora_dict(str(newpath))
712
+
713
+ new_lora_model_list = get_lora_model_list()
714
+ new_lora_tupled_list = get_all_lora_tupled_list()
715
+
716
+ return gr.update(
717
+ choices=new_lora_tupled_list, value=new_lora_model_list[-1]
718
+ ), gr.update(
719
+ choices=new_lora_tupled_list
720
+ ), gr.update(
721
+ choices=new_lora_tupled_list
722
+ ), gr.update(
723
+ choices=new_lora_tupled_list
724
+ ), gr.update(
725
+ choices=new_lora_tupled_list
726
+ )
727
+
728
+
729
+ def get_civitai_info(path):
730
+ global civitai_not_exists_list
731
+ global loras_url_to_path_dict
732
+ import requests
733
+ from requests.adapters import HTTPAdapter
734
+ from urllib3.util import Retry
735
+ default = ["", "", "", "", ""]
736
+ if path in set(civitai_not_exists_list): return default
737
+ if not Path(path).exists(): return None
738
+ user_agent = get_user_agent()
739
+ headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
740
+ base_url = 'https://civitai.com/api/v1/model-versions/by-hash/'
741
+ params = {}
742
+ session = requests.Session()
743
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
744
+ session.mount("https://", HTTPAdapter(max_retries=retries))
745
+ import hashlib
746
+ with open(path, 'rb') as file:
747
+ file_data = file.read()
748
+ hash_sha256 = hashlib.sha256(file_data).hexdigest()
749
+ url = base_url + hash_sha256
750
+ try:
751
+ r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
752
+ except Exception as e:
753
+ print(e)
754
+ return default
755
+ else:
756
+ if not r.ok: return None
757
+ json = r.json()
758
+ if 'baseModel' not in json:
759
+ civitai_not_exists_list.append(path)
760
+ return default
761
+ items = []
762
+ items.append(" / ".join(json['trainedWords'])) # The words (prompts) used to trigger the model
763
+ items.append(json['baseModel']) # Base model (SDXL1.0, Pony, ...)
764
+ items.append(json['model']['name']) # The name of the model version
765
+ items.append(f"https://civitai.com/models/{json['modelId']}") # The repo url for the model
766
+ items.append(json['images'][0]['url']) # The url for a sample image
767
+ loras_url_to_path_dict[path] = json['downloadUrl'] # The download url to get the model file for this specific version
768
+ return items
769
+
770
+
771
+ def search_lora_on_civitai(query: str, allow_model: list[str] = ["Pony", "SDXL 1.0"], limit: int = 100):
772
+ import requests
773
+ from requests.adapters import HTTPAdapter
774
+ from urllib3.util import Retry
775
+ if not query: return None
776
+ user_agent = get_user_agent()
777
+ headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
778
+ base_url = 'https://civitai.com/api/v1/models'
779
+ params = {'query': query, 'types': ['LORA'], 'sort': 'Highest Rated', 'period': 'AllTime',
780
+ 'nsfw': 'true', 'supportsGeneration ': 'true', 'limit': limit}
781
+ session = requests.Session()
782
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
783
+ session.mount("https://", HTTPAdapter(max_retries=retries))
784
+ try:
785
+ r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(3.0, 30))
786
+ except Exception as e:
787
+ print(e)
788
+ return None
789
+ else:
790
+ if not r.ok: return None
791
+ json = r.json()
792
+ if 'items' not in json: return None
793
+ items = []
794
+ for j in json['items']:
795
+ for model in j['modelVersions']:
796
+ item = {}
797
+ if model['baseModel'] not in set(allow_model): continue
798
+ item['name'] = j['name']
799
+ item['creator'] = j['creator']['username']
800
+ item['tags'] = j['tags']
801
+ item['model_name'] = model['name']
802
+ item['base_model'] = model['baseModel']
803
+ item['dl_url'] = model['downloadUrl']
804
+ item['md'] = f'<img src="{model["images"][0]["url"]}" alt="thumbnail" width="150" height="240"><br>[LoRA Model URL](https://civitai.com/models/{j["id"]})'
805
+ items.append(item)
806
+ return items
807
+
808
+
809
+ def search_civitai_lora(query, base_model):
810
+ global civitai_lora_last_results
811
+ items = search_lora_on_civitai(query, base_model)
812
+ if not items: return gr.update(choices=[("", "")], value="", visible=False),\
813
+ gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
814
+ civitai_lora_last_results = {}
815
+ choices = []
816
+ for item in items:
817
+ base_model_name = "Pony🐴" if item['base_model'] == "Pony" else item['base_model']
818
+ name = f"{item['name']} (for {base_model_name} / By: {item['creator']} / Tags: {', '.join(item['tags'])})"
819
+ value = item['dl_url']
820
+ choices.append((name, value))
821
+ civitai_lora_last_results[value] = item
822
+ if not choices: return gr.update(choices=[("", "")], value="", visible=False),\
823
+ gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
824
+ result = civitai_lora_last_results.get(choices[0][1], "None")
825
+ md = result['md'] if result else ""
826
+ return gr.update(choices=choices, value=choices[0][1], visible=True), gr.update(value=md, visible=True),\
827
+ gr.update(visible=True), gr.update(visible=True)
828
+
829
+
830
+ def select_civitai_lora(search_result):
831
+ if not "http" in search_result: return gr.update(value=""), gr.update(value="None", visible=True)
832
+ result = civitai_lora_last_results.get(search_result, "None")
833
+ md = result['md'] if result else ""
834
+ return gr.update(value=search_result), gr.update(value=md, visible=True)
835
+
836
+
837
+ def find_similar_lora(q: str):
838
+ from rapidfuzz.process import extractOne
839
+ from rapidfuzz.utils import default_process
840
+ query = to_lora_key(q)
841
+ print(f"Finding <lora:{query}:...>...")
842
+ keys = list(private_lora_dict.keys())
843
+ values = [x[2] for x in list(private_lora_dict.values())]
844
+ s = default_process(query)
845
+ e1 = extractOne(s, keys + values, processor=default_process, score_cutoff=80.0)
846
+ key = ""
847
+ if e1:
848
+ e = e1[0]
849
+ if e in set(keys): key = e
850
+ elif e in set(values): key = keys[values.index(e)]
851
+ if key:
852
+ path = to_lora_path(key)
853
+ new_path = to_lora_path(query)
854
+ if not Path(path).exists():
855
+ if not Path(new_path).exists(): download_private_file_from_somewhere(path, True)
856
+ if Path(path).exists() and copy_lora(path, new_path): return new_path
857
+ print(f"Finding <lora:{query}:...> on Civitai...")
858
+ civitai_query = Path(query).stem if Path(query).is_file() else query
859
+ civitai_query = civitai_query.replace("_", " ").replace("-", " ")
860
+ base_model = ["Pony", "SDXL 1.0"]
861
+ items = search_lora_on_civitai(civitai_query, base_model, 1)
862
+ if items:
863
+ item = items[0]
864
+ path = download_lora(item['dl_url'])
865
+ new_path = query if Path(query).is_file() else to_lora_path(query)
866
+ if path and copy_lora(path, new_path): return new_path
867
+ return None
868
+
869
+
870
+ def change_interface_mode(mode: str):
871
+ if mode == "Fast":
872
+ return gr.update(open=False), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
873
+ gr.update(visible=True), gr.update(open=False), gr.update(visible=True), gr.update(open=False),\
874
+ gr.update(visible=True), gr.update(value="Fast")
875
+ elif mode == "Simple": # t2i mode
876
+ return gr.update(open=True), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
877
+ gr.update(visible=True), gr.update(open=False), gr.update(visible=False), gr.update(open=True),\
878
+ gr.update(visible=False), gr.update(value="Standard")
879
+ elif mode == "LoRA": # t2i LoRA mode
880
+ return gr.update(open=True), gr.update(visible=True), gr.update(open=True), gr.update(open=False),\
881
+ gr.update(visible=True), gr.update(open=True), gr.update(visible=True), gr.update(open=False),\
882
+ gr.update(visible=False), gr.update(value="Standard")
883
+ else: # Standard
884
+ return gr.update(open=False), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
885
+ gr.update(visible=True), gr.update(open=False), gr.update(visible=True), gr.update(open=False),\
886
+ gr.update(visible=True), gr.update(value="Standard")
887
+
888
+
889
+ quality_prompt_list = [
890
+ {
891
+ "name": "None",
892
+ "prompt": "",
893
+ "negative_prompt": "lowres",
894
+ },
895
+ {
896
+ "name": "Animagine Common",
897
+ "prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres",
898
+ "negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
899
+ },
900
+ {
901
+ "name": "Pony Anime Common",
902
+ "prompt": "source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres",
903
+ "negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
904
+ },
905
+ {
906
+ "name": "Pony Common",
907
+ "prompt": "source_anime, score_9, score_8_up, score_7_up",
908
+ "negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
909
+ },
910
+ {
911
+ "name": "Animagine Standard v3.0",
912
+ "prompt": "masterpiece, best quality",
913
+ "negative_prompt": "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name",
914
+ },
915
+ {
916
+ "name": "Animagine Standard v3.1",
917
+ "prompt": "masterpiece, best quality, very aesthetic, absurdres",
918
+ "negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
919
+ },
920
+ {
921
+ "name": "Animagine Light v3.1",
922
+ "prompt": "(masterpiece), best quality, very aesthetic, perfect face",
923
+ "negative_prompt": "(low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn",
924
+ },
925
+ {
926
+ "name": "Animagine Heavy v3.1",
927
+ "prompt": "(masterpiece), (best quality), (ultra-detailed), very aesthetic, illustration, disheveled hair, perfect composition, moist skin, intricate details",
928
+ "negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair, extra digit, fewer digits, cropped, worst quality, low quality, very displeasing",
929
+ },
930
+ ]
931
+
932
+
933
+ style_list = [
934
+ {
935
+ "name": "None",
936
+ "prompt": "",
937
+ "negative_prompt": "",
938
+ },
939
+ {
940
+ "name": "Cinematic",
941
+ "prompt": "cinematic still, emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
942
+ "negative_prompt": "cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
943
+ },
944
+ {
945
+ "name": "Photographic",
946
+ "prompt": "cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed",
947
+ "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
948
+ },
949
+ {
950
+ "name": "Anime",
951
+ "prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed",
952
+ "negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
953
+ },
954
+ {
955
+ "name": "Manga",
956
+ "prompt": "manga style, vibrant, high-energy, detailed, iconic, Japanese comic style",
957
+ "negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
958
+ },
959
+ {
960
+ "name": "Digital Art",
961
+ "prompt": "concept art, digital artwork, illustrative, painterly, matte painting, highly detailed",
962
+ "negative_prompt": "photo, photorealistic, realism, ugly",
963
+ },
964
+ {
965
+ "name": "Pixel art",
966
+ "prompt": "pixel-art, low-res, blocky, pixel art style, 8-bit graphics",
967
+ "negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
968
+ },
969
+ {
970
+ "name": "Fantasy art",
971
+ "prompt": "ethereal fantasy concept art, magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
972
+ "negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
973
+ },
974
+ {
975
+ "name": "Neonpunk",
976
+ "prompt": "neonpunk style, cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
977
+ "negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
978
+ },
979
+ {
980
+ "name": "3D Model",
981
+ "prompt": "professional 3d model, octane render, highly detailed, volumetric, dramatic lighting",
982
+ "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
983
+ },
984
+ ]
985
+
986
+
987
+ optimization_list = {
988
+ "None": [28, 7., 'Euler a', False, 'None', 1.],
989
+ "Default": [28, 7., 'Euler a', False, 'None', 1.],
990
+ "SPO": [28, 7., 'Euler a', True, 'loras/spo_sdxl_10ep_4k-data_lora_diffusers.safetensors', 1.],
991
+ "DPO": [28, 7., 'Euler a', True, 'loras/sdxl-DPO-LoRA.safetensors', 1.],
992
+ "DPO Turbo": [8, 2.5, 'LCM', True, 'loras/sd_xl_dpo_turbo_lora_v1-128dim.safetensors', 1.],
993
+ "SDXL Turbo": [8, 2.5, 'LCM', True, 'loras/sd_xl_turbo_lora_v1.safetensors', 1.],
994
+ "Hyper-SDXL 12step": [12, 5., 'TCD', True, 'loras/Hyper-SDXL-12steps-CFG-lora.safetensors', 1.],
995
+ "Hyper-SDXL 8step": [8, 5., 'TCD', True, 'loras/Hyper-SDXL-8steps-CFG-lora.safetensors', 1.],
996
+ "Hyper-SDXL 4step": [4, 0, 'TCD', True, 'loras/Hyper-SDXL-4steps-lora.safetensors', 1.],
997
+ "Hyper-SDXL 2step": [2, 0, 'TCD', True, 'loras/Hyper-SDXL-2steps-lora.safetensors', 1.],
998
+ "Hyper-SDXL 1step": [1, 0, 'TCD', True, 'loras/Hyper-SDXL-1steps-lora.safetensors', 1.],
999
+ "PCM 16step": [16, 4., 'Euler a trailing', True, 'loras/pcm_sdxl_normalcfg_16step_converted.safetensors', 1.],
1000
+ "PCM 8step": [8, 4., 'Euler a trailing', True, 'loras/pcm_sdxl_normalcfg_8step_converted.safetensors', 1.],
1001
+ "PCM 4step": [4, 2., 'Euler a trailing', True, 'loras/pcm_sdxl_smallcfg_4step_converted.safetensors', 1.],
1002
+ "PCM 2step": [2, 1., 'Euler a trailing', True, 'loras/pcm_sdxl_smallcfg_2step_converted.safetensors', 1.],
1003
+ }
1004
+
1005
+
1006
+ def set_optimization(opt, steps_gui, cfg_gui, sampler_gui, clip_skip_gui, lora_gui, lora_scale_gui):
1007
+ if not opt in list(optimization_list.keys()): opt = "None"
1008
+ def_steps_gui = 28
1009
+ def_cfg_gui = 7.
1010
+ steps = optimization_list.get(opt, "None")[0]
1011
+ cfg = optimization_list.get(opt, "None")[1]
1012
+ sampler = optimization_list.get(opt, "None")[2]
1013
+ clip_skip = optimization_list.get(opt, "None")[3]
1014
+ lora = optimization_list.get(opt, "None")[4]
1015
+ lora_scale = optimization_list.get(opt, "None")[5]
1016
+ if opt == "None":
1017
+ steps = max(steps_gui, def_steps_gui)
1018
+ cfg = max(cfg_gui, def_cfg_gui)
1019
+ clip_skip = clip_skip_gui
1020
+ elif opt == "SPO" or opt == "DPO":
1021
+ steps = max(steps_gui, def_steps_gui)
1022
+ cfg = max(cfg_gui, def_cfg_gui)
1023
+
1024
+ return gr.update(value=steps), gr.update(value=cfg), gr.update(value=sampler),\
1025
+ gr.update(value=clip_skip), gr.update(value=lora), gr.update(value=lora_scale),
1026
+
1027
+
1028
+ # [sampler_gui, steps_gui, cfg_gui, clip_skip_gui, img_width_gui, img_height_gui, optimization_gui]
1029
+ preset_sampler_setting = {
1030
+ "None": ["Euler a", 28, 7., True, 1024, 1024, "None"],
1031
+ "Anime 3:4 Fast": ["LCM", 8, 2.5, True, 896, 1152, "DPO Turbo"],
1032
+ "Anime 3:4 Standard": ["Euler a", 28, 7., True, 896, 1152, "None"],
1033
+ "Anime 3:4 Heavy": ["Euler a", 40, 7., True, 896, 1152, "None"],
1034
+ "Anime 1:1 Fast": ["LCM", 8, 2.5, True, 1024, 1024, "DPO Turbo"],
1035
+ "Anime 1:1 Standard": ["Euler a", 28, 7., True, 1024, 1024, "None"],
1036
+ "Anime 1:1 Heavy": ["Euler a", 40, 7., True, 1024, 1024, "None"],
1037
+ "Photo 3:4 Fast": ["LCM", 8, 2.5, False, 896, 1152, "DPO Turbo"],
1038
+ "Photo 3:4 Standard": ["DPM++ 2M Karras", 28, 7., False, 896, 1152, "None"],
1039
+ "Photo 3:4 Heavy": ["DPM++ 2M Karras", 40, 7., False, 896, 1152, "None"],
1040
+ "Photo 1:1 Fast": ["LCM", 8, 2.5, False, 1024, 1024, "DPO Turbo"],
1041
+ "Photo 1:1 Standard": ["DPM++ 2M Karras", 28, 7., False, 1024, 1024, "None"],
1042
+ "Photo 1:1 Heavy": ["DPM++ 2M Karras", 40, 7., False, 1024, 1024, "None"],
1043
+ }
1044
+
1045
+
1046
+ def set_sampler_settings(sampler_setting):
1047
+ if not sampler_setting in list(preset_sampler_setting.keys()) or sampler_setting == "None":
1048
+ return gr.update(value="Euler a"), gr.update(value=28), gr.update(value=7.), gr.update(value=True),\
1049
+ gr.update(value=1024), gr.update(value=1024), gr.update(value="None")
1050
+ v = preset_sampler_setting.get(sampler_setting, ["Euler a", 28, 7., True, 1024, 1024])
1051
+ # sampler, steps, cfg, clip_skip, width, height, optimization
1052
+ return gr.update(value=v[0]), gr.update(value=v[1]), gr.update(value=v[2]), gr.update(value=v[3]),\
1053
+ gr.update(value=v[4]), gr.update(value=v[5]), gr.update(value=v[6])
1054
+
1055
+
1056
+ preset_styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
1057
+ preset_quality = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in quality_prompt_list}
1058
+
1059
+
1060
+ def process_style_prompt(prompt: str, neg_prompt: str, styles_key: str = "None", quality_key: str = "None", type: str = "Auto"):
1061
+ def to_list(s):
1062
+ return [x.strip() for x in s.split(",") if not s == ""]
1063
+
1064
+ def list_sub(a, b):
1065
+ return [e for e in a if e not in b]
1066
+
1067
+ def list_uniq(l):
1068
+ return sorted(set(l), key=l.index)
1069
+
1070
+ animagine_ps = to_list("anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres")
1071
+ animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
1072
+ pony_ps = to_list("source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
1073
+ pony_nps = to_list("source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
1074
+ prompts = to_list(prompt)
1075
+ neg_prompts = to_list(neg_prompt)
1076
+
1077
+ all_styles_ps = []
1078
+ all_styles_nps = []
1079
+ for d in style_list:
1080
+ all_styles_ps.extend(to_list(str(d.get("prompt", ""))))
1081
+ all_styles_nps.extend(to_list(str(d.get("negative_prompt", ""))))
1082
+
1083
+ all_quality_ps = []
1084
+ all_quality_nps = []
1085
+ for d in quality_prompt_list:
1086
+ all_quality_ps.extend(to_list(str(d.get("prompt", ""))))
1087
+ all_quality_nps.extend(to_list(str(d.get("negative_prompt", ""))))
1088
+
1089
+ quality_ps = to_list(preset_quality[quality_key][0])
1090
+ quality_nps = to_list(preset_quality[quality_key][1])
1091
+ styles_ps = to_list(preset_styles[styles_key][0])
1092
+ styles_nps = to_list(preset_styles[styles_key][1])
1093
+
1094
+ prompts = list_sub(prompts, animagine_ps + pony_ps + all_styles_ps + all_quality_ps)
1095
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + all_styles_nps + all_quality_nps)
1096
+
1097
+ last_empty_p = [""] if not prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
1098
+ last_empty_np = [""] if not neg_prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
1099
+
1100
+ if type == "Animagine":
1101
+ prompts = prompts + animagine_ps
1102
+ neg_prompts = neg_prompts + animagine_nps
1103
+ elif type == "Pony":
1104
+ prompts = prompts + pony_ps
1105
+ neg_prompts = neg_prompts + pony_nps
1106
+
1107
+ prompts = prompts + styles_ps + quality_ps
1108
+ neg_prompts = neg_prompts + styles_nps + quality_nps
1109
+
1110
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
1111
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
1112
+
1113
+ return gr.update(value=prompt), gr.update(value=neg_prompt), gr.update(value=type)
1114
+
1115
+
1116
+ def set_quick_presets(genre:str = "None", type:str = "Auto", speed:str = "None", aspect:str = "None"):
1117
+ quality = "None"
1118
+ style = "None"
1119
+ sampler = "None"
1120
+ opt = "None"
1121
+
1122
+ if genre == "Anime":
1123
+ if type != "None" and type != "Auto": style = "Anime"
1124
+ if aspect == "1:1":
1125
+ if speed == "Heavy":
1126
+ sampler = "Anime 1:1 Heavy"
1127
+ elif speed == "Fast":
1128
+ sampler = "Anime 1:1 Fast"
1129
+ else:
1130
+ sampler = "Anime 1:1 Standard"
1131
+ elif aspect == "3:4":
1132
+ if speed == "Heavy":
1133
+ sampler = "Anime 3:4 Heavy"
1134
+ elif speed == "Fast":
1135
+ sampler = "Anime 3:4 Fast"
1136
+ else:
1137
+ sampler = "Anime 3:4 Standard"
1138
+ if type == "Pony":
1139
+ quality = "Pony Anime Common"
1140
+ elif type == "Animagine":
1141
+ quality = "Animagine Common"
1142
+ else:
1143
+ quality = "None"
1144
+ elif genre == "Photo":
1145
+ if type != "None" and type != "Auto": style = "Photographic"
1146
+ if aspect == "1:1":
1147
+ if speed == "Heavy":
1148
+ sampler = "Photo 1:1 Heavy"
1149
+ elif speed == "Fast":
1150
+ sampler = "Photo 1:1 Fast"
1151
+ else:
1152
+ sampler = "Photo 1:1 Standard"
1153
+ elif aspect == "3:4":
1154
+ if speed == "Heavy":
1155
+ sampler = "Photo 3:4 Heavy"
1156
+ elif speed == "Fast":
1157
+ sampler = "Photo 3:4 Fast"
1158
+ else:
1159
+ sampler = "Photo 3:4 Standard"
1160
+ if type == "Pony":
1161
+ quality = "Pony Common"
1162
+ else:
1163
+ quality = "None"
1164
+
1165
+ if speed == "Fast":
1166
+ opt = "DPO Turbo"
1167
+ if genre == "Anime" and type != "Pony" and type != "Auto": quality = "Animagine Light v3.1"
1168
+
1169
+ return gr.update(value=quality), gr.update(value=style), gr.update(value=sampler), gr.update(value=opt), gr.update(value=type)
1170
+
1171
+
1172
+ textual_inversion_dict = {}
1173
+ try:
1174
+ with open('textual_inversion_dict.json', encoding='utf-8') as f:
1175
+ textual_inversion_dict = json.load(f)
1176
+ except Exception:
1177
+ pass
1178
+ textual_inversion_file_token_list = []
1179
+
1180
+
1181
+ def get_tupled_embed_list(embed_list):
1182
+ global textual_inversion_file_list
1183
+ tupled_list = []
1184
+ for file in embed_list:
1185
+ token = textual_inversion_dict.get(Path(file).name, [Path(file).stem.replace(",",""), False])[0]
1186
+ tupled_list.append((token, file))
1187
+ textual_inversion_file_token_list.append(token)
1188
+ return tupled_list
1189
+
1190
+
1191
+ def set_textual_inversion_prompt(textual_inversion_gui, prompt_gui, neg_prompt_gui, prompt_syntax_gui):
1192
+ ti_tags = list(textual_inversion_dict.values()) + textual_inversion_file_token_list
1193
+ tags = prompt_gui.split(",") if prompt_gui else []
1194
+ prompts = []
1195
+ for tag in tags:
1196
+ tag = str(tag).strip()
1197
+ if tag and not tag in ti_tags:
1198
+ prompts.append(tag)
1199
+ ntags = neg_prompt_gui.split(",") if neg_prompt_gui else []
1200
+ neg_prompts = []
1201
+ for tag in ntags:
1202
+ tag = str(tag).strip()
1203
+ if tag and not tag in ti_tags:
1204
+ neg_prompts.append(tag)
1205
+ ti_prompts = []
1206
+ ti_neg_prompts = []
1207
+ for ti in textual_inversion_gui:
1208
+ tokens = textual_inversion_dict.get(Path(ti).name, [Path(ti).stem.replace(",",""), False])
1209
+ is_positive = tokens[1] == True or "positive" in Path(ti).parent.name
1210
+ if is_positive: # positive prompt
1211
+ ti_prompts.append(tokens[0])
1212
+ else: # negative prompt (default)
1213
+ ti_neg_prompts.append(tokens[0])
1214
+ empty = [""]
1215
+ prompt = ", ".join(prompts + ti_prompts + empty)
1216
+ neg_prompt = ", ".join(neg_prompts + ti_neg_prompts + empty)
1217
+ return gr.update(value=prompt), gr.update(value=neg_prompt),
1218
+
1219
+
1220
+ def get_model_pipeline(repo_id: str):
1221
+ from huggingface_hub import HfApi
1222
+ api = HfApi()
1223
+ default = "StableDiffusionPipeline"
1224
+ try:
1225
+ if " " in repo_id or not api.repo_exists(repo_id): return default
1226
+ model = api.model_info(repo_id=repo_id)
1227
+ except Exception as e:
1228
+ return default
1229
+ if model.private or model.gated: return default
1230
+ tags = model.tags
1231
+ if not 'diffusers' in tags: return default
1232
+ if 'diffusers:FluxPipeline' in tags:
1233
+ return "FluxPipeline"
1234
+ if 'diffusers:StableDiffusionXLPipeline' in tags:
1235
+ return "StableDiffusionXLPipeline"
1236
+ elif 'diffusers:StableDiffusionPipeline' in tags:
1237
+ return "StableDiffusionPipeline"
1238
+ else:
1239
+ return default
1240
+
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ git-lfs aria2 -y ffmpeg
pre-requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ pip>=23.0.0
requirements.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ spaces
2
+ accelerate
3
+ diffusers
4
+ invisible_watermark
5
+ transformers
6
+ xformers
7
+ git+https://github.com/R3gm/stablepy.git
8
+ torch==2.2.0
9
+ gdown
10
+ opencv-python
11
+ yt-dlp
12
+ huggingface_hub
13
+ scikit-build-core
14
+ https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.90-cu124/llama_cpp_python-0.2.90-cp310-cp310-linux_x86_64.whl
15
+ git+https://github.com/Maximilian-Winter/llama-cpp-agent
16
+ pybind11>=2.12
17
+ rapidfuzz
18
+ torchvision
19
+ optimum[onnxruntime]
20
+ dartrs
21
+ translatepy
22
+ timm
23
+ wrapt-timeout-decorator
24
+ sentencepiece
tagger/character_series_dict.csv ADDED
The diff for this file is too large to render. See raw diff
 
tagger/danbooru_e621.csv ADDED
The diff for this file is too large to render. See raw diff
 
tagger/fl2sd3longcap.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoProcessor, AutoModelForCausalLM
2
+ import spaces
3
+ import re
4
+ from PIL import Image
5
+ import torch
6
+
7
+ import subprocess
8
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
9
+
10
+ device = "cuda" if torch.cuda.is_available() else "cpu"
11
+ fl_model = AutoModelForCausalLM.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True).to("cpu").eval()
12
+ fl_processor = AutoProcessor.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True)
13
+
14
+
15
+ def fl_modify_caption(caption: str) -> str:
16
+ """
17
+ Removes specific prefixes from captions if present, otherwise returns the original caption.
18
+ Args:
19
+ caption (str): A string containing a caption.
20
+ Returns:
21
+ str: The caption with the prefix removed if it was present, or the original caption.
22
+ """
23
+ # Define the prefixes to remove
24
+ prefix_substrings = [
25
+ ('captured from ', ''),
26
+ ('captured at ', '')
27
+ ]
28
+
29
+ # Create a regex pattern to match any of the prefixes
30
+ pattern = '|'.join([re.escape(opening) for opening, _ in prefix_substrings])
31
+ replacers = {opening.lower(): replacer for opening, replacer in prefix_substrings}
32
+
33
+ # Function to replace matched prefix with its corresponding replacement
34
+ def replace_fn(match):
35
+ return replacers[match.group(0).lower()]
36
+
37
+ # Apply the regex to the caption
38
+ modified_caption = re.sub(pattern, replace_fn, caption, count=1, flags=re.IGNORECASE)
39
+
40
+ # If the caption was modified, return the modified version; otherwise, return the original
41
+ return modified_caption if modified_caption != caption else caption
42
+
43
+
44
+ @spaces.GPU(duration=30)
45
+ def fl_run_example(image):
46
+ task_prompt = "<DESCRIPTION>"
47
+ prompt = task_prompt + "Describe this image in great detail."
48
+
49
+ # Ensure the image is in RGB mode
50
+ if image.mode != "RGB":
51
+ image = image.convert("RGB")
52
+
53
+ fl_model.to(device)
54
+ inputs = fl_processor(text=prompt, images=image, return_tensors="pt").to(device)
55
+ generated_ids = fl_model.generate(
56
+ input_ids=inputs["input_ids"],
57
+ pixel_values=inputs["pixel_values"],
58
+ max_new_tokens=1024,
59
+ num_beams=3
60
+ )
61
+ fl_model.to("cpu")
62
+ generated_text = fl_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
63
+ parsed_answer = fl_processor.post_process_generation(generated_text, task=task_prompt, image_size=(image.width, image.height))
64
+ return fl_modify_caption(parsed_answer["<DESCRIPTION>"])
65
+
66
+
67
+ def predict_tags_fl2_sd3(image: Image.Image, input_tags: str, algo: list[str]):
68
+ def to_list(s):
69
+ return [x.strip() for x in s.split(",") if not s == ""]
70
+
71
+ def list_uniq(l):
72
+ return sorted(set(l), key=l.index)
73
+
74
+ if not "Use Florence-2-SD3-Long-Captioner" in algo:
75
+ return input_tags
76
+ tag_list = list_uniq(to_list(input_tags) + to_list(fl_run_example(image) + ", "))
77
+ tag_list.remove("")
78
+ return ", ".join(tag_list)
tagger/output.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+
4
+ @dataclass
5
+ class UpsamplingOutput:
6
+ upsampled_tags: str
7
+
8
+ copyright_tags: str
9
+ character_tags: str
10
+ general_tags: str
11
+ rating_tag: str
12
+ aspect_ratio_tag: str
13
+ length_tag: str
14
+ identity_tag: str
15
+
16
+ elapsed_time: float = 0.0
tagger/tag_group.csv ADDED
The diff for this file is too large to render. See raw diff
 
tagger/tagger.py ADDED
@@ -0,0 +1,556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ from PIL import Image
3
+ import torch
4
+ import gradio as gr
5
+ from transformers import AutoImageProcessor, AutoModelForImageClassification
6
+ from pathlib import Path
7
+
8
+
9
+ WD_MODEL_NAMES = ["p1atdev/wd-swinv2-tagger-v3-hf"]
10
+ WD_MODEL_NAME = WD_MODEL_NAMES[0]
11
+
12
+ device = "cuda" if torch.cuda.is_available() else "cpu"
13
+ default_device = device
14
+
15
+ try:
16
+ wd_model = AutoModelForImageClassification.from_pretrained(WD_MODEL_NAME, trust_remote_code=True).to(default_device).eval()
17
+ wd_processor = AutoImageProcessor.from_pretrained(WD_MODEL_NAME, trust_remote_code=True)
18
+ except Exception as e:
19
+ print(e)
20
+ wd_model = wd_processor = None
21
+
22
+ def _people_tag(noun: str, minimum: int = 1, maximum: int = 5):
23
+ return (
24
+ [f"1{noun}"]
25
+ + [f"{num}{noun}s" for num in range(minimum + 1, maximum + 1)]
26
+ + [f"{maximum+1}+{noun}s"]
27
+ )
28
+
29
+
30
+ PEOPLE_TAGS = (
31
+ _people_tag("girl") + _people_tag("boy") + _people_tag("other") + ["no humans"]
32
+ )
33
+
34
+
35
+ RATING_MAP = {
36
+ "sfw": "safe",
37
+ "general": "safe",
38
+ "sensitive": "sensitive",
39
+ "questionable": "nsfw",
40
+ "explicit": "explicit, nsfw",
41
+ }
42
+ DANBOORU_TO_E621_RATING_MAP = {
43
+ "sfw": "rating_safe",
44
+ "general": "rating_safe",
45
+ "safe": "rating_safe",
46
+ "sensitive": "rating_safe",
47
+ "nsfw": "rating_explicit",
48
+ "explicit, nsfw": "rating_explicit",
49
+ "explicit": "rating_explicit",
50
+ "rating:safe": "rating_safe",
51
+ "rating:general": "rating_safe",
52
+ "rating:sensitive": "rating_safe",
53
+ "rating:questionable, nsfw": "rating_explicit",
54
+ "rating:explicit, nsfw": "rating_explicit",
55
+ }
56
+
57
+
58
+ # https://github.com/toriato/stable-diffusion-webui-wd14-tagger/blob/a9eacb1eff904552d3012babfa28b57e1d3e295c/tagger/ui.py#L368
59
+ kaomojis = [
60
+ "0_0",
61
+ "(o)_(o)",
62
+ "+_+",
63
+ "+_-",
64
+ "._.",
65
+ "<o>_<o>",
66
+ "<|>_<|>",
67
+ "=_=",
68
+ ">_<",
69
+ "3_3",
70
+ "6_9",
71
+ ">_o",
72
+ "@_@",
73
+ "^_^",
74
+ "o_o",
75
+ "u_u",
76
+ "x_x",
77
+ "|_|",
78
+ "||_||",
79
+ ]
80
+
81
+
82
+ def replace_underline(x: str):
83
+ return x.strip().replace("_", " ") if x not in kaomojis else x.strip()
84
+
85
+
86
+ def to_list(s):
87
+ return [x.strip() for x in s.split(",") if not s == ""]
88
+
89
+
90
+ def list_sub(a, b):
91
+ return [e for e in a if e not in b]
92
+
93
+
94
+ def list_uniq(l):
95
+ return sorted(set(l), key=l.index)
96
+
97
+
98
+ def load_dict_from_csv(filename):
99
+ dict = {}
100
+ if not Path(filename).exists():
101
+ if Path('./tagger/', filename).exists(): filename = str(Path('./tagger/', filename))
102
+ else: return dict
103
+ try:
104
+ with open(filename, 'r', encoding="utf-8") as f:
105
+ lines = f.readlines()
106
+ except Exception:
107
+ print(f"Failed to open dictionary file: {filename}")
108
+ return dict
109
+ for line in lines:
110
+ parts = line.strip().split(',')
111
+ dict[parts[0]] = parts[1]
112
+ return dict
113
+
114
+
115
+ anime_series_dict = load_dict_from_csv('character_series_dict.csv')
116
+
117
+
118
+ def character_list_to_series_list(character_list):
119
+ output_series_tag = []
120
+ series_tag = ""
121
+ series_dict = anime_series_dict
122
+ for tag in character_list:
123
+ series_tag = series_dict.get(tag, "")
124
+ if tag.endswith(")"):
125
+ tags = tag.split("(")
126
+ character_tag = "(".join(tags[:-1])
127
+ if character_tag.endswith(" "):
128
+ character_tag = character_tag[:-1]
129
+ series_tag = tags[-1].replace(")", "")
130
+
131
+ if series_tag:
132
+ output_series_tag.append(series_tag)
133
+
134
+ return output_series_tag
135
+
136
+
137
+ def select_random_character(series: str, character: str):
138
+ from random import seed, randrange
139
+ seed()
140
+ character_list = list(anime_series_dict.keys())
141
+ character = character_list[randrange(len(character_list) - 1)]
142
+ series = anime_series_dict.get(character.split(",")[0].strip(), "")
143
+ return series, character
144
+
145
+
146
+ def danbooru_to_e621(dtag, e621_dict):
147
+ def d_to_e(match, e621_dict):
148
+ dtag = match.group(0)
149
+ etag = e621_dict.get(replace_underline(dtag), "")
150
+ if etag:
151
+ return etag
152
+ else:
153
+ return dtag
154
+
155
+ import re
156
+ tag = re.sub(r'[\w ]+', lambda wrapper: d_to_e(wrapper, e621_dict), dtag, 2)
157
+ return tag
158
+
159
+
160
+ danbooru_to_e621_dict = load_dict_from_csv('danbooru_e621.csv')
161
+
162
+
163
+ def convert_danbooru_to_e621_prompt(input_prompt: str = "", prompt_type: str = "danbooru"):
164
+ if prompt_type == "danbooru": return input_prompt
165
+ tags = input_prompt.split(",") if input_prompt else []
166
+ people_tags: list[str] = []
167
+ other_tags: list[str] = []
168
+ rating_tags: list[str] = []
169
+
170
+ e621_dict = danbooru_to_e621_dict
171
+ for tag in tags:
172
+ tag = replace_underline(tag)
173
+ tag = danbooru_to_e621(tag, e621_dict)
174
+ if tag in PEOPLE_TAGS:
175
+ people_tags.append(tag)
176
+ elif tag in DANBOORU_TO_E621_RATING_MAP.keys():
177
+ rating_tags.append(DANBOORU_TO_E621_RATING_MAP.get(tag.replace(" ",""), ""))
178
+ else:
179
+ other_tags.append(tag)
180
+
181
+ rating_tags = sorted(set(rating_tags), key=rating_tags.index)
182
+ rating_tags = [rating_tags[0]] if rating_tags else []
183
+ rating_tags = ["explicit, nsfw"] if rating_tags and rating_tags[0] == "explicit" else rating_tags
184
+
185
+ output_prompt = ", ".join(people_tags + other_tags + rating_tags)
186
+
187
+ return output_prompt
188
+
189
+
190
+ from translatepy import Translator
191
+ translator = Translator()
192
+ def translate_prompt_old(prompt: str = ""):
193
+ def translate_to_english(input: str):
194
+ try:
195
+ output = str(translator.translate(input, 'English'))
196
+ except Exception as e:
197
+ output = input
198
+ print(e)
199
+ return output
200
+
201
+ def is_japanese(s):
202
+ import unicodedata
203
+ for ch in s:
204
+ name = unicodedata.name(ch, "")
205
+ if "CJK UNIFIED" in name or "HIRAGANA" in name or "KATAKANA" in name:
206
+ return True
207
+ return False
208
+
209
+ def to_list(s):
210
+ return [x.strip() for x in s.split(",")]
211
+
212
+ prompts = to_list(prompt)
213
+ outputs = []
214
+ for p in prompts:
215
+ p = translate_to_english(p) if is_japanese(p) else p
216
+ outputs.append(p)
217
+
218
+ return ", ".join(outputs)
219
+
220
+
221
+ def translate_prompt(input: str):
222
+ try:
223
+ output = str(translator.translate(input, 'English'))
224
+ except Exception as e:
225
+ output = input
226
+ print(e)
227
+ return output
228
+
229
+
230
+ def translate_prompt_to_ja(prompt: str = ""):
231
+ def translate_to_japanese(input: str):
232
+ try:
233
+ output = str(translator.translate(input, 'Japanese'))
234
+ except Exception as e:
235
+ output = input
236
+ print(e)
237
+ return output
238
+
239
+ def is_japanese(s):
240
+ import unicodedata
241
+ for ch in s:
242
+ name = unicodedata.name(ch, "")
243
+ if "CJK UNIFIED" in name or "HIRAGANA" in name or "KATAKANA" in name:
244
+ return True
245
+ return False
246
+
247
+ def to_list(s):
248
+ return [x.strip() for x in s.split(",")]
249
+
250
+ prompts = to_list(prompt)
251
+ outputs = []
252
+ for p in prompts:
253
+ p = translate_to_japanese(p) if not is_japanese(p) else p
254
+ outputs.append(p)
255
+
256
+ return ", ".join(outputs)
257
+
258
+
259
+ def tags_to_ja(itag, dict):
260
+ def t_to_j(match, dict):
261
+ tag = match.group(0)
262
+ ja = dict.get(replace_underline(tag), "")
263
+ if ja:
264
+ return ja
265
+ else:
266
+ return tag
267
+
268
+ import re
269
+ tag = re.sub(r'[\w ]+', lambda wrapper: t_to_j(wrapper, dict), itag, 2)
270
+
271
+ return tag
272
+
273
+
274
+ def convert_tags_to_ja(input_prompt: str = ""):
275
+ tags = input_prompt.split(",") if input_prompt else []
276
+ out_tags = []
277
+
278
+ tags_to_ja_dict = load_dict_from_csv('all_tags_ja_ext.csv')
279
+ dict = tags_to_ja_dict
280
+ for tag in tags:
281
+ tag = replace_underline(tag)
282
+ tag = tags_to_ja(tag, dict)
283
+ out_tags.append(tag)
284
+
285
+ return ", ".join(out_tags)
286
+
287
+
288
+ enable_auto_recom_prompt = True
289
+
290
+
291
+ animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
292
+ animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
293
+ pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
294
+ pony_nps = to_list("source_pony, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
295
+ other_ps = to_list("anime artwork, anime style, studio anime, highly detailed, cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed")
296
+ other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
297
+ default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
298
+ default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
299
+ def insert_recom_prompt(prompt: str = "", neg_prompt: str = "", type: str = "None"):
300
+ global enable_auto_recom_prompt
301
+ prompts = to_list(prompt)
302
+ neg_prompts = to_list(neg_prompt)
303
+
304
+ prompts = list_sub(prompts, animagine_ps + pony_ps)
305
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps)
306
+
307
+ last_empty_p = [""] if not prompts and type != "None" else []
308
+ last_empty_np = [""] if not neg_prompts and type != "None" else []
309
+
310
+ if type == "Auto":
311
+ enable_auto_recom_prompt = True
312
+ else:
313
+ enable_auto_recom_prompt = False
314
+ if type == "Animagine":
315
+ prompts = prompts + animagine_ps
316
+ neg_prompts = neg_prompts + animagine_nps
317
+ elif type == "Pony":
318
+ prompts = prompts + pony_ps
319
+ neg_prompts = neg_prompts + pony_nps
320
+
321
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
322
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
323
+
324
+ return prompt, neg_prompt
325
+
326
+
327
+ def load_model_prompt_dict():
328
+ import json
329
+ dict = {}
330
+ path = 'model_dict.json' if Path('model_dict.json').exists() else './tagger/model_dict.json'
331
+ try:
332
+ with open('model_dict.json', encoding='utf-8') as f:
333
+ dict = json.load(f)
334
+ except Exception:
335
+ pass
336
+ return dict
337
+
338
+
339
+ model_prompt_dict = load_model_prompt_dict()
340
+
341
+
342
+ def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None"):
343
+ if not model_name or not enable_auto_recom_prompt: return prompt, neg_prompt
344
+ prompts = to_list(prompt)
345
+ neg_prompts = to_list(neg_prompt)
346
+ prompts = list_sub(prompts, animagine_ps + pony_ps + other_ps)
347
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + other_nps)
348
+ last_empty_p = [""] if not prompts and type != "None" else []
349
+ last_empty_np = [""] if not neg_prompts and type != "None" else []
350
+ ps = []
351
+ nps = []
352
+ if model_name in model_prompt_dict.keys():
353
+ ps = to_list(model_prompt_dict[model_name]["prompt"])
354
+ nps = to_list(model_prompt_dict[model_name]["negative_prompt"])
355
+ else:
356
+ ps = default_ps
357
+ nps = default_nps
358
+ prompts = prompts + ps
359
+ neg_prompts = neg_prompts + nps
360
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
361
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
362
+ return prompt, neg_prompt
363
+
364
+
365
+ tag_group_dict = load_dict_from_csv('tag_group.csv')
366
+
367
+
368
+ def remove_specific_prompt(input_prompt: str = "", keep_tags: str = "all"):
369
+ def is_dressed(tag):
370
+ import re
371
+ p = re.compile(r'dress|cloth|uniform|costume|vest|sweater|coat|shirt|jacket|blazer|apron|leotard|hood|sleeve|skirt|shorts|pant|loafer|ribbon|necktie|bow|collar|glove|sock|shoe|boots|wear|emblem')
372
+ return p.search(tag)
373
+
374
+ def is_background(tag):
375
+ import re
376
+ p = re.compile(r'background|outline|light|sky|build|day|screen|tree|city')
377
+ return p.search(tag)
378
+
379
+ un_tags = ['solo']
380
+ group_list = ['groups', 'body_parts', 'attire', 'posture', 'objects', 'creatures', 'locations', 'disambiguation_pages', 'commonly_misused_tags', 'phrases', 'verbs_and_gerunds', 'subjective', 'nudity', 'sex_objects', 'sex', 'sex_acts', 'image_composition', 'artistic_license', 'text', 'year_tags', 'metatags']
381
+ keep_group_dict = {
382
+ "body": ['groups', 'body_parts'],
383
+ "dress": ['groups', 'body_parts', 'attire'],
384
+ "all": group_list,
385
+ }
386
+
387
+ def is_necessary(tag, keep_tags, group_dict):
388
+ if keep_tags == "all":
389
+ return True
390
+ elif tag in un_tags or group_dict.get(tag, "") in explicit_group:
391
+ return False
392
+ elif keep_tags == "body" and is_dressed(tag):
393
+ return False
394
+ elif is_background(tag):
395
+ return False
396
+ else:
397
+ return True
398
+
399
+ if keep_tags == "all": return input_prompt
400
+ keep_group = keep_group_dict.get(keep_tags, keep_group_dict["body"])
401
+ explicit_group = list(set(group_list) ^ set(keep_group))
402
+
403
+ tags = input_prompt.split(",") if input_prompt else []
404
+ people_tags: list[str] = []
405
+ other_tags: list[str] = []
406
+
407
+ group_dict = tag_group_dict
408
+ for tag in tags:
409
+ tag = replace_underline(tag)
410
+ if tag in PEOPLE_TAGS:
411
+ people_tags.append(tag)
412
+ elif is_necessary(tag, keep_tags, group_dict):
413
+ other_tags.append(tag)
414
+
415
+ output_prompt = ", ".join(people_tags + other_tags)
416
+
417
+ return output_prompt
418
+
419
+
420
+ def sort_taglist(tags: list[str]):
421
+ if not tags: return []
422
+ character_tags: list[str] = []
423
+ series_tags: list[str] = []
424
+ people_tags: list[str] = []
425
+ group_list = ['groups', 'body_parts', 'attire', 'posture', 'objects', 'creatures', 'locations', 'disambiguation_pages', 'commonly_misused_tags', 'phrases', 'verbs_and_gerunds', 'subjective', 'nudity', 'sex_objects', 'sex', 'sex_acts', 'image_composition', 'artistic_license', 'text', 'year_tags', 'metatags']
426
+ group_tags = {}
427
+ other_tags: list[str] = []
428
+ rating_tags: list[str] = []
429
+
430
+ group_dict = tag_group_dict
431
+ group_set = set(group_dict.keys())
432
+ character_set = set(anime_series_dict.keys())
433
+ series_set = set(anime_series_dict.values())
434
+ rating_set = set(DANBOORU_TO_E621_RATING_MAP.keys()) | set(DANBOORU_TO_E621_RATING_MAP.values())
435
+
436
+ for tag in tags:
437
+ tag = replace_underline(tag)
438
+ if tag in PEOPLE_TAGS:
439
+ people_tags.append(tag)
440
+ elif tag in rating_set:
441
+ rating_tags.append(tag)
442
+ elif tag in group_set:
443
+ elem = group_dict[tag]
444
+ group_tags[elem] = group_tags[elem] + [tag] if elem in group_tags else [tag]
445
+ elif tag in character_set:
446
+ character_tags.append(tag)
447
+ elif tag in series_set:
448
+ series_tags.append(tag)
449
+ else:
450
+ other_tags.append(tag)
451
+
452
+ output_group_tags: list[str] = []
453
+ for k in group_list:
454
+ output_group_tags.extend(group_tags.get(k, []))
455
+
456
+ rating_tags = [rating_tags[0]] if rating_tags else []
457
+ rating_tags = ["explicit, nsfw"] if rating_tags and rating_tags[0] == "explicit" else rating_tags
458
+
459
+ output_tags = character_tags + series_tags + people_tags + output_group_tags + other_tags + rating_tags
460
+
461
+ return output_tags
462
+
463
+
464
+ def sort_tags(tags: str):
465
+ if not tags: return ""
466
+ taglist: list[str] = []
467
+ for tag in tags.split(","):
468
+ taglist.append(tag.strip())
469
+ taglist = list(filter(lambda x: x != "", taglist))
470
+ return ", ".join(sort_taglist(taglist))
471
+
472
+
473
+ def postprocess_results(results: dict[str, float], general_threshold: float, character_threshold: float):
474
+ results = {
475
+ k: v for k, v in sorted(results.items(), key=lambda item: item[1], reverse=True)
476
+ }
477
+
478
+ rating = {}
479
+ character = {}
480
+ general = {}
481
+
482
+ for k, v in results.items():
483
+ if k.startswith("rating:"):
484
+ rating[k.replace("rating:", "")] = v
485
+ continue
486
+ elif k.startswith("character:"):
487
+ character[k.replace("character:", "")] = v
488
+ continue
489
+
490
+ general[k] = v
491
+
492
+ character = {k: v for k, v in character.items() if v >= character_threshold}
493
+ general = {k: v for k, v in general.items() if v >= general_threshold}
494
+
495
+ return rating, character, general
496
+
497
+
498
+ def gen_prompt(rating: list[str], character: list[str], general: list[str]):
499
+ people_tags: list[str] = []
500
+ other_tags: list[str] = []
501
+ rating_tag = RATING_MAP[rating[0]]
502
+
503
+ for tag in general:
504
+ if tag in PEOPLE_TAGS:
505
+ people_tags.append(tag)
506
+ else:
507
+ other_tags.append(tag)
508
+
509
+ all_tags = people_tags + other_tags
510
+
511
+ return ", ".join(all_tags)
512
+
513
+
514
+ @spaces.GPU(duration=30)
515
+ def predict_tags(image: Image.Image, general_threshold: float = 0.3, character_threshold: float = 0.8):
516
+ inputs = wd_processor.preprocess(image, return_tensors="pt")
517
+
518
+ outputs = wd_model(**inputs.to(wd_model.device, wd_model.dtype))
519
+ logits = torch.sigmoid(outputs.logits[0]) # take the first logits
520
+
521
+ # get probabilities
522
+ if device != default_device: wd_model.to(device=device)
523
+ results = {
524
+ wd_model.config.id2label[i]: float(logit.float()) for i, logit in enumerate(logits)
525
+ }
526
+ if device != default_device: wd_model.to(device=default_device)
527
+ # rating, character, general
528
+ rating, character, general = postprocess_results(
529
+ results, general_threshold, character_threshold
530
+ )
531
+ prompt = gen_prompt(
532
+ list(rating.keys()), list(character.keys()), list(general.keys())
533
+ )
534
+ output_series_tag = ""
535
+ output_series_list = character_list_to_series_list(character.keys())
536
+ if output_series_list:
537
+ output_series_tag = output_series_list[0]
538
+ else:
539
+ output_series_tag = ""
540
+ return output_series_tag, ", ".join(character.keys()), prompt, gr.update(interactive=True)
541
+
542
+
543
+ def predict_tags_wd(image: Image.Image, input_tags: str, algo: list[str], general_threshold: float = 0.3,
544
+ character_threshold: float = 0.8, input_series: str = "", input_character: str = ""):
545
+ if not "Use WD Tagger" in algo and len(algo) != 0:
546
+ return input_series, input_character, input_tags, gr.update(interactive=True)
547
+ return predict_tags(image, general_threshold, character_threshold)
548
+
549
+
550
+ def compose_prompt_to_copy(character: str, series: str, general: str):
551
+ characters = character.split(",") if character else []
552
+ serieses = series.split(",") if series else []
553
+ generals = general.split(",") if general else []
554
+ tags = characters + serieses + generals
555
+ cprompt = ",".join(tags) if tags else ""
556
+ return cprompt
tagger/utils.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from dartrs.v2 import AspectRatioTag, LengthTag, RatingTag, IdentityTag
3
+
4
+
5
+ V2_ASPECT_RATIO_OPTIONS: list[AspectRatioTag] = [
6
+ "ultra_wide",
7
+ "wide",
8
+ "square",
9
+ "tall",
10
+ "ultra_tall",
11
+ ]
12
+ V2_RATING_OPTIONS: list[RatingTag] = [
13
+ "sfw",
14
+ "general",
15
+ "sensitive",
16
+ "nsfw",
17
+ "questionable",
18
+ "explicit",
19
+ ]
20
+ V2_LENGTH_OPTIONS: list[LengthTag] = [
21
+ "very_short",
22
+ "short",
23
+ "medium",
24
+ "long",
25
+ "very_long",
26
+ ]
27
+ V2_IDENTITY_OPTIONS: list[IdentityTag] = [
28
+ "none",
29
+ "lax",
30
+ "strict",
31
+ ]
32
+
33
+
34
+ # ref: https://qiita.com/tregu148/items/fccccbbc47d966dd2fc2
35
+ def gradio_copy_text(_text: None):
36
+ gr.Info("Copied!")
37
+
38
+
39
+ COPY_ACTION_JS = """\
40
+ (inputs, _outputs) => {
41
+ // inputs is the string value of the input_text
42
+ if (inputs.trim() !== "") {
43
+ navigator.clipboard.writeText(inputs);
44
+ }
45
+ }"""
46
+
47
+
48
+ def gradio_copy_prompt(prompt: str):
49
+ gr.Info("Copied!")
50
+ return prompt
tagger/v2.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import torch
3
+ from typing import Callable
4
+ from pathlib import Path
5
+
6
+ from dartrs.v2 import (
7
+ V2Model,
8
+ MixtralModel,
9
+ MistralModel,
10
+ compose_prompt,
11
+ LengthTag,
12
+ AspectRatioTag,
13
+ RatingTag,
14
+ IdentityTag,
15
+ )
16
+ from dartrs.dartrs import DartTokenizer
17
+ from dartrs.utils import get_generation_config
18
+
19
+
20
+ import gradio as gr
21
+ from gradio.components import Component
22
+
23
+
24
+ try:
25
+ from output import UpsamplingOutput
26
+ except:
27
+ from .output import UpsamplingOutput
28
+
29
+
30
+ V2_ALL_MODELS = {
31
+ "dart-v2-moe-sft": {
32
+ "repo": "p1atdev/dart-v2-moe-sft",
33
+ "type": "sft",
34
+ "class": MixtralModel,
35
+ },
36
+ "dart-v2-sft": {
37
+ "repo": "p1atdev/dart-v2-sft",
38
+ "type": "sft",
39
+ "class": MistralModel,
40
+ },
41
+ }
42
+
43
+
44
+ def prepare_models(model_config: dict):
45
+ model_name = model_config["repo"]
46
+ tokenizer = DartTokenizer.from_pretrained(model_name)
47
+ model = model_config["class"].from_pretrained(model_name)
48
+
49
+ return {
50
+ "tokenizer": tokenizer,
51
+ "model": model,
52
+ }
53
+
54
+
55
+ def normalize_tags(tokenizer: DartTokenizer, tags: str):
56
+ """Just remove unk tokens."""
57
+ return ", ".join([tag for tag in tokenizer.tokenize(tags) if tag != "<|unk|>"])
58
+
59
+
60
+ @torch.no_grad()
61
+ def generate_tags(
62
+ model: V2Model,
63
+ tokenizer: DartTokenizer,
64
+ prompt: str,
65
+ ban_token_ids: list[int],
66
+ ):
67
+ output = model.generate(
68
+ get_generation_config(
69
+ prompt,
70
+ tokenizer=tokenizer,
71
+ temperature=1,
72
+ top_p=0.9,
73
+ top_k=100,
74
+ max_new_tokens=256,
75
+ ban_token_ids=ban_token_ids,
76
+ ),
77
+ )
78
+
79
+ return output
80
+
81
+
82
+ def _people_tag(noun: str, minimum: int = 1, maximum: int = 5):
83
+ return (
84
+ [f"1{noun}"]
85
+ + [f"{num}{noun}s" for num in range(minimum + 1, maximum + 1)]
86
+ + [f"{maximum+1}+{noun}s"]
87
+ )
88
+
89
+
90
+ PEOPLE_TAGS = (
91
+ _people_tag("girl") + _people_tag("boy") + _people_tag("other") + ["no humans"]
92
+ )
93
+
94
+
95
+ def gen_prompt_text(output: UpsamplingOutput):
96
+ # separate people tags (e.g. 1girl)
97
+ people_tags = []
98
+ other_general_tags = []
99
+
100
+ for tag in output.general_tags.split(","):
101
+ tag = tag.strip()
102
+ if tag in PEOPLE_TAGS:
103
+ people_tags.append(tag)
104
+ else:
105
+ other_general_tags.append(tag)
106
+
107
+ return ", ".join(
108
+ [
109
+ part.strip()
110
+ for part in [
111
+ *people_tags,
112
+ output.character_tags,
113
+ output.copyright_tags,
114
+ *other_general_tags,
115
+ output.upsampled_tags,
116
+ output.rating_tag,
117
+ ]
118
+ if part.strip() != ""
119
+ ]
120
+ )
121
+
122
+
123
+ def elapsed_time_format(elapsed_time: float) -> str:
124
+ return f"Elapsed: {elapsed_time:.2f} seconds"
125
+
126
+
127
+ def parse_upsampling_output(
128
+ upsampler: Callable[..., UpsamplingOutput],
129
+ ):
130
+ def _parse_upsampling_output(*args) -> tuple[str, str, dict]:
131
+ output = upsampler(*args)
132
+
133
+ return (
134
+ gen_prompt_text(output),
135
+ elapsed_time_format(output.elapsed_time),
136
+ gr.update(interactive=True),
137
+ gr.update(interactive=True),
138
+ )
139
+
140
+ return _parse_upsampling_output
141
+
142
+
143
+ class V2UI:
144
+ model_name: str | None = None
145
+ model: V2Model
146
+ tokenizer: DartTokenizer
147
+
148
+ input_components: list[Component] = []
149
+ generate_btn: gr.Button
150
+
151
+ def on_generate(
152
+ self,
153
+ model_name: str,
154
+ copyright_tags: str,
155
+ character_tags: str,
156
+ general_tags: str,
157
+ rating_tag: RatingTag,
158
+ aspect_ratio_tag: AspectRatioTag,
159
+ length_tag: LengthTag,
160
+ identity_tag: IdentityTag,
161
+ ban_tags: str,
162
+ *args,
163
+ ) -> UpsamplingOutput:
164
+ if self.model_name is None or self.model_name != model_name:
165
+ models = prepare_models(V2_ALL_MODELS[model_name])
166
+ self.model = models["model"]
167
+ self.tokenizer = models["tokenizer"]
168
+ self.model_name = model_name
169
+
170
+ # normalize tags
171
+ # copyright_tags = normalize_tags(self.tokenizer, copyright_tags)
172
+ # character_tags = normalize_tags(self.tokenizer, character_tags)
173
+ # general_tags = normalize_tags(self.tokenizer, general_tags)
174
+
175
+ ban_token_ids = self.tokenizer.encode(ban_tags.strip())
176
+
177
+ prompt = compose_prompt(
178
+ prompt=general_tags,
179
+ copyright=copyright_tags,
180
+ character=character_tags,
181
+ rating=rating_tag,
182
+ aspect_ratio=aspect_ratio_tag,
183
+ length=length_tag,
184
+ identity=identity_tag,
185
+ )
186
+
187
+ start = time.time()
188
+ upsampled_tags = generate_tags(
189
+ self.model,
190
+ self.tokenizer,
191
+ prompt,
192
+ ban_token_ids,
193
+ )
194
+ elapsed_time = time.time() - start
195
+
196
+ return UpsamplingOutput(
197
+ upsampled_tags=upsampled_tags,
198
+ copyright_tags=copyright_tags,
199
+ character_tags=character_tags,
200
+ general_tags=general_tags,
201
+ rating_tag=rating_tag,
202
+ aspect_ratio_tag=aspect_ratio_tag,
203
+ length_tag=length_tag,
204
+ identity_tag=identity_tag,
205
+ elapsed_time=elapsed_time,
206
+ )
207
+
208
+
209
+ def parse_upsampling_output_simple(upsampler: UpsamplingOutput):
210
+ return gen_prompt_text(upsampler)
211
+
212
+
213
+ v2 = V2UI()
214
+
215
+
216
+ def v2_upsampling_prompt(model: str = "dart-v2-moe-sft", copyright: str = "", character: str = "",
217
+ general_tags: str = "", rating: str = "nsfw", aspect_ratio: str = "square",
218
+ length: str = "very_long", identity: str = "lax", ban_tags: str = "censored"):
219
+ raw_prompt = parse_upsampling_output_simple(v2.on_generate(model, copyright, character, general_tags,
220
+ rating, aspect_ratio, length, identity, ban_tags))
221
+ return raw_prompt
222
+
223
+
224
+ def load_dict_from_csv(filename):
225
+ dict = {}
226
+ if not Path(filename).exists():
227
+ if Path('./tagger/', filename).exists(): filename = str(Path('./tagger/', filename))
228
+ else: return dict
229
+ try:
230
+ with open(filename, 'r', encoding="utf-8") as f:
231
+ lines = f.readlines()
232
+ except Exception:
233
+ print(f"Failed to open dictionary file: {filename}")
234
+ return dict
235
+ for line in lines:
236
+ parts = line.strip().split(',')
237
+ dict[parts[0]] = parts[1]
238
+ return dict
239
+
240
+
241
+ anime_series_dict = load_dict_from_csv('character_series_dict.csv')
242
+
243
+
244
+ def select_random_character(series: str, character: str):
245
+ from random import seed, randrange
246
+ seed()
247
+ character_list = list(anime_series_dict.keys())
248
+ character = character_list[randrange(len(character_list) - 1)]
249
+ series = anime_series_dict.get(character.split(",")[0].strip(), "")
250
+ return series, character
251
+
252
+
253
+ def v2_random_prompt(general_tags: str = "", copyright: str = "", character: str = "", rating: str = "nsfw",
254
+ aspect_ratio: str = "square", length: str = "very_long", identity: str = "lax",
255
+ ban_tags: str = "censored", model: str = "dart-v2-moe-sft"):
256
+ if copyright == "" and character == "":
257
+ copyright, character = select_random_character("", "")
258
+ raw_prompt = v2_upsampling_prompt(model, copyright, character, general_tags, rating,
259
+ aspect_ratio, length, identity, ban_tags)
260
+ return raw_prompt, copyright, character
textual_inversion_dict.json ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bad_prompt_version2.pt": [
3
+ "bad_prompt",
4
+ false
5
+ ],
6
+ "EasyNegativeV2.safetensors": [
7
+ "EasyNegative",
8
+ false
9
+ ],
10
+ "bad-hands-5.pt": [
11
+ "bad_hand",
12
+ false
13
+ ],
14
+ "negativeXL_A.safetensors": [
15
+ "negativeXL_A",
16
+ false
17
+ ],
18
+ "negativeXL_B.safetensors": [
19
+ "negativeXL_B",
20
+ false
21
+ ],
22
+ "negativeXL_C.safetensors": [
23
+ "negativeXL_C",
24
+ false
25
+ ],
26
+ "negativeXL_D.safetensors": [
27
+ "negativeXL_D",
28
+ false
29
+ ],
30
+ "unaestheticXL2v10.safetensors": [
31
+ "2v10",
32
+ false
33
+ ],
34
+ "unaestheticXL_AYv1.safetensors": [
35
+ "_AYv1",
36
+ false
37
+ ],
38
+ "unaestheticXL_Alb2.safetensors": [
39
+ "_Alb2",
40
+ false
41
+ ],
42
+ "unaestheticXL_Jug6.safetensors": [
43
+ "_Jug6",
44
+ false
45
+ ],
46
+ "unaestheticXL_bp5.safetensors": [
47
+ "_bp5",
48
+ false
49
+ ],
50
+ "unaestheticXL_hk1.safetensors": [
51
+ "_hk1",
52
+ false
53
+ ],
54
+ "unaestheticXLv1.safetensors": [
55
+ "v1.0",
56
+ false
57
+ ],
58
+ "unaestheticXLv13.safetensors": [
59
+ "v1.3",
60
+ false
61
+ ],
62
+ "unaestheticXLv31.safetensors": [
63
+ "v3.1",
64
+ false
65
+ ],
66
+ "unaestheticXL_Sky3.1.safetensors": [
67
+ "_Sky3.1",
68
+ false
69
+ ],
70
+ "SimplePositiveXLv2.safetensors": [
71
+ "SIMPLEPOSITIVEXLV2",
72
+ true
73
+ ]
74
+ }