John6666 commited on
Commit
eec4bb9
1 Parent(s): aab58d5

Upload 8 files

Browse files
Files changed (8) hide show
  1. app.py +54 -18
  2. constants.py +113 -88
  3. dc.py +202 -397
  4. image_processor.py +130 -0
  5. llmdolphin.py +255 -0
  6. modutils.py +68 -24
  7. requirements.txt +2 -1
  8. utils.py +76 -17
app.py CHANGED
@@ -8,7 +8,7 @@ from dc import (infer, _infer, pass_result, get_diffusers_model_list, get_sample
8
  preset_quality, preset_styles, process_style_prompt, get_all_lora_tupled_list, update_loras, apply_lora_prompt,
9
  download_my_lora, search_civitai_lora, update_civitai_selection, select_civitai_lora, search_civitai_lora_json,
10
  get_t2i_model_info, get_civitai_tag, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
11
- SCHEDULE_TYPE_OPTIONS, SCHEDULE_PREDICTION_TYPE_OPTIONS)
12
  # Translator
13
  from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
14
  get_llm_formats, get_dolphin_model_format, get_dolphin_models,
@@ -34,7 +34,7 @@ def description_ui():
34
 
35
 
36
  MAX_SEED = np.iinfo(np.int32).max
37
- MAX_IMAGE_SIZE = 1216
38
 
39
  css = """
40
  #container { margin: 0 auto; !important; }
@@ -70,18 +70,23 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
70
 
71
  with gr.Accordion("Advanced Settings", open=False):
72
  with gr.Row():
73
- negative_prompt = gr.Text(label="Negative prompt", lines=1, max_lines=6, placeholder="Enter a negative prompt",
74
  value="(low quality, worst quality:1.2), very displeasing, watermark, signature, ugly")
75
 
76
  with gr.Row():
77
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
78
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
 
79
 
80
  with gr.Row():
81
  width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 832
82
  height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 1216
83
  guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=30.0, step=0.1, value=7)
 
84
  num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=100, step=1, value=28)
 
 
 
85
 
86
  with gr.Row():
87
  with gr.Column(scale=4):
@@ -99,11 +104,11 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
99
  vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
100
 
101
  with gr.Accordion("LoRA", open=True, visible=True):
102
- def lora_dropdown(label):
103
- return gr.Dropdown(label=label, choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
104
 
105
- def lora_scale_slider(label):
106
- return gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label=label)
107
 
108
  def lora_textbox():
109
  return gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
@@ -149,6 +154,22 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
149
  lora5_info = lora_textbox()
150
  lora5_copy = gr.Button(value="Copy example to prompt", visible=False)
151
  lora5_md = gr.Markdown(value="", visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
  with gr.Accordion("From URL", open=True, visible=True):
153
  with gr.Row():
154
  lora_search_civitai_basemodel = gr.CheckboxGroup(label="Search LoRA for", choices=CIVITAI_BASEMODEL, value=["Pony", "Illustrious", "SDXL 1.0"])
@@ -172,6 +193,9 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
172
  quality_selector = gr.Radio(label="Quality Tag Presets", interactive=True, choices=list(preset_quality.keys()), value="None", scale=3)
173
  style_selector = gr.Radio(label="Style Presets", interactive=True, choices=list(preset_styles.keys()), value="None", scale=3)
174
  recom_prompt = gr.Checkbox(label="Recommended prompt", value=True, scale=1)
 
 
 
175
 
176
  with gr.Accordion("Translation Settings", open=False):
177
  chatbot = gr.Chatbot(render_markdown=False, visible=False) # component for auto-translation
@@ -203,8 +227,10 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
203
  fn=infer,
204
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
205
  guidance_scale, num_inference_steps, model_name,
206
- lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
207
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type, recom_prompt],
 
 
208
  outputs=[result],
209
  queue=True,
210
  show_progress="full",
@@ -216,8 +242,10 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
216
  fn=_infer, # dummy fn for api
217
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
218
  guidance_scale, num_inference_steps, model_name,
219
- lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
220
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type, recom_prompt],
 
 
221
  outputs=[result],
222
  queue=False,
223
  show_api=True,
@@ -239,8 +267,10 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
239
  fn=infer,
240
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
241
  guidance_scale, num_inference_steps, model_name,
242
- lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
243
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type, recom_prompt],
 
 
244
  outputs=[result],
245
  queue=True,
246
  show_progress="full",
@@ -252,12 +282,13 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
252
 
253
  gr.on(
254
  triggers=[lora1.change, lora1_wt.change, lora2.change, lora2_wt.change, lora3.change, lora3_wt.change,
255
- lora4.change, lora4_wt.change, lora5.change, lora5_wt.change],
256
  fn=update_loras,
257
- inputs=[prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt],
258
  outputs=[prompt, lora1, lora1_wt, lora1_info, lora1_copy, lora1_md,
259
  lora2, lora2_wt, lora2_info, lora2_copy, lora2_md, lora3, lora3_wt, lora3_info, lora3_copy, lora3_md,
260
- lora4, lora4_wt, lora4_info, lora4_copy, lora4_md, lora5, lora5_wt, lora5_info, lora5_copy, lora5_md],
 
261
  queue=False,
262
  trigger_mode="once",
263
  show_api=False,
@@ -267,6 +298,8 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
267
  lora3_copy.click(apply_lora_prompt, [prompt, lora3_info], [prompt], queue=False, show_api=False)
268
  lora4_copy.click(apply_lora_prompt, [prompt, lora4_info], [prompt], queue=False, show_api=False)
269
  lora5_copy.click(apply_lora_prompt, [prompt, lora5_info], [prompt], queue=False, show_api=False)
 
 
270
 
271
  gr.on(
272
  triggers=[lora_search_civitai_submit.click, lora_search_civitai_query.submit],
@@ -282,8 +315,8 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
282
  gr.on(
283
  triggers=[lora_download.click, lora_download_url.submit],
284
  fn=download_my_lora,
285
- inputs=[lora_download_url,lora1, lora2, lora3, lora4, lora5],
286
- outputs=[lora1, lora2, lora3, lora4, lora5],
287
  scroll_to_output=True,
288
  queue=True,
289
  show_api=False,
@@ -434,6 +467,9 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
434
  outputs=[result_up_tab],
435
  )
436
 
 
 
 
437
  gr.LoginButton()
438
  gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
439
 
 
8
  preset_quality, preset_styles, process_style_prompt, get_all_lora_tupled_list, update_loras, apply_lora_prompt,
9
  download_my_lora, search_civitai_lora, update_civitai_selection, select_civitai_lora, search_civitai_lora_json,
10
  get_t2i_model_info, get_civitai_tag, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
11
+ SCHEDULE_TYPE_OPTIONS, SCHEDULE_PREDICTION_TYPE_OPTIONS, preprocessor_tab, SDXL_TASK, TASK_MODEL_LIST)
12
  # Translator
13
  from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
14
  get_llm_formats, get_dolphin_model_format, get_dolphin_models,
 
34
 
35
 
36
  MAX_SEED = np.iinfo(np.int32).max
37
+ MAX_IMAGE_SIZE = 4096
38
 
39
  css = """
40
  #container { margin: 0 auto; !important; }
 
70
 
71
  with gr.Accordion("Advanced Settings", open=False):
72
  with gr.Row():
73
+ negative_prompt = gr.Text(label="Negative prompt", lines=1, max_lines=6, placeholder="Enter a negative prompt", show_copy_button=True,
74
  value="(low quality, worst quality:1.2), very displeasing, watermark, signature, ugly")
75
 
76
  with gr.Row():
77
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
78
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
79
+ gpu_duration = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
80
 
81
  with gr.Row():
82
  width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 832
83
  height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 1216
84
  guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=30.0, step=0.1, value=7)
85
+ guidance_rescale = gr.Slider(label="CFG rescale:", value=0., step=0.01, minimum=0., maximum=1.5)
86
  num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=100, step=1, value=28)
87
+ pag_scale = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
88
+ clip_skip = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
89
+ free_u = gr.Checkbox(value=False, label="FreeU")
90
 
91
  with gr.Row():
92
  with gr.Column(scale=4):
 
104
  vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
105
 
106
  with gr.Accordion("LoRA", open=True, visible=True):
107
+ def lora_dropdown(label, visible=True):
108
+ return gr.Dropdown(label=label, choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320, visible=visible)
109
 
110
+ def lora_scale_slider(label, visible=True):
111
+ return gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label=label, visible=visible)
112
 
113
  def lora_textbox():
114
  return gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
 
154
  lora5_info = lora_textbox()
155
  lora5_copy = gr.Button(value="Copy example to prompt", visible=False)
156
  lora5_md = gr.Markdown(value="", visible=False)
157
+ with gr.Column():
158
+ with gr.Row():
159
+ lora6 = lora_dropdown("LoRA 6", visible=False)
160
+ lora6_wt = lora_scale_slider("LoRA 6: weight", visible=False)
161
+ with gr.Row():
162
+ lora6_info = lora_textbox()
163
+ lora6_copy = gr.Button(value="Copy example to prompt", visible=False)
164
+ lora6_md = gr.Markdown(value="", visible=False)
165
+ with gr.Column():
166
+ with gr.Row():
167
+ lora7 = lora_dropdown("LoRA 7", visible=False)
168
+ lora7_wt = lora_scale_slider("LoRA 7: weight", visible=False)
169
+ with gr.Row():
170
+ lora7_info = lora_textbox()
171
+ lora7_copy = gr.Button(value="Copy example to prompt", visible=False)
172
+ lora7_md = gr.Markdown(value="", visible=False)
173
  with gr.Accordion("From URL", open=True, visible=True):
174
  with gr.Row():
175
  lora_search_civitai_basemodel = gr.CheckboxGroup(label="Search LoRA for", choices=CIVITAI_BASEMODEL, value=["Pony", "Illustrious", "SDXL 1.0"])
 
193
  quality_selector = gr.Radio(label="Quality Tag Presets", interactive=True, choices=list(preset_quality.keys()), value="None", scale=3)
194
  style_selector = gr.Radio(label="Style Presets", interactive=True, choices=list(preset_styles.keys()), value="None", scale=3)
195
  recom_prompt = gr.Checkbox(label="Recommended prompt", value=True, scale=1)
196
+
197
+ with gr.Accordion("Other", open=True, visible=True):
198
+ task = gr.Dropdown(label="Task", choices=SDXL_TASK, value=TASK_MODEL_LIST[0])
199
 
200
  with gr.Accordion("Translation Settings", open=False):
201
  chatbot = gr.Chatbot(render_markdown=False, visible=False) # component for auto-translation
 
227
  fn=infer,
228
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
229
  guidance_scale, num_inference_steps, model_name,
230
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt,
231
+ lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt,
232
+ sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
233
+ clip_skip, pag_scale, free_u, guidance_rescale, gpu_duration, recom_prompt],
234
  outputs=[result],
235
  queue=True,
236
  show_progress="full",
 
242
  fn=_infer, # dummy fn for api
243
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
244
  guidance_scale, num_inference_steps, model_name,
245
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt,
246
+ lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt,
247
+ sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
248
+ clip_skip, pag_scale, free_u, guidance_rescale, gpu_duration, recom_prompt],
249
  outputs=[result],
250
  queue=False,
251
  show_api=True,
 
267
  fn=infer,
268
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
269
  guidance_scale, num_inference_steps, model_name,
270
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt,
271
+ lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt,
272
+ sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
273
+ clip_skip, pag_scale, free_u, guidance_rescale, gpu_duration, recom_prompt],
274
  outputs=[result],
275
  queue=True,
276
  show_progress="full",
 
282
 
283
  gr.on(
284
  triggers=[lora1.change, lora1_wt.change, lora2.change, lora2_wt.change, lora3.change, lora3_wt.change,
285
+ lora4.change, lora4_wt.change, lora5.change, lora5_wt.change, lora6.change, lora6_wt.change, lora7.change, lora7_wt.change],
286
  fn=update_loras,
287
+ inputs=[prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt],
288
  outputs=[prompt, lora1, lora1_wt, lora1_info, lora1_copy, lora1_md,
289
  lora2, lora2_wt, lora2_info, lora2_copy, lora2_md, lora3, lora3_wt, lora3_info, lora3_copy, lora3_md,
290
+ lora4, lora4_wt, lora4_info, lora4_copy, lora4_md, lora5, lora5_wt, lora5_info, lora5_copy, lora5_md,
291
+ lora6, lora6_wt, lora6_info, lora6_copy, lora6_md, lora7, lora7_wt, lora7_info, lora7_copy, lora7_md],
292
  queue=False,
293
  trigger_mode="once",
294
  show_api=False,
 
298
  lora3_copy.click(apply_lora_prompt, [prompt, lora3_info], [prompt], queue=False, show_api=False)
299
  lora4_copy.click(apply_lora_prompt, [prompt, lora4_info], [prompt], queue=False, show_api=False)
300
  lora5_copy.click(apply_lora_prompt, [prompt, lora5_info], [prompt], queue=False, show_api=False)
301
+ lora6_copy.click(apply_lora_prompt, [prompt, lora6_info], [prompt], queue=False, show_api=False)
302
+ lora7_copy.click(apply_lora_prompt, [prompt, lora7_info], [prompt], queue=False, show_api=False)
303
 
304
  gr.on(
305
  triggers=[lora_search_civitai_submit.click, lora_search_civitai_query.submit],
 
315
  gr.on(
316
  triggers=[lora_download.click, lora_download_url.submit],
317
  fn=download_my_lora,
318
+ inputs=[lora_download_url, lora1, lora2, lora3, lora4, lora5, lora6, lora7],
319
+ outputs=[lora1, lora2, lora3, lora4, lora5, lora6, lora7],
320
  scroll_to_output=True,
321
  queue=True,
322
  show_api=False,
 
467
  outputs=[result_up_tab],
468
  )
469
 
470
+ with gr.Tab("Preprocessor", render=True):
471
+ preprocessor_tab()
472
+
473
  gr.LoginButton()
474
  gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
475
 
constants.py CHANGED
@@ -7,30 +7,49 @@ from stablepy import (
7
  )
8
 
9
  # - **Download Models**
10
- DOWNLOAD_MODEL = "https://civitai.com/api/download/models/574369, https://huggingface.co/TechnoByte/MilkyWonderland/resolve/main/milkyWonderland_v40.safetensors"
11
 
12
  # - **Download VAEs**
13
- DOWNLOAD_VAE = "https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-c-1.1-b-0.5.safetensors?download=true, https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-blessed.safetensors?download=true, https://huggingface.co/digiplay/VAE/resolve/main/vividReal_v20.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true"
14
 
15
  # - **Download LoRAs**
16
  DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book_-_LineArt.safetensors, https://civitai.com/api/download/models/135867, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-8steps-CFG-lora.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-8steps-CFG-lora.safetensors?download=true"
17
 
18
  LOAD_DIFFUSERS_FORMAT_MODEL = [
19
  'stabilityai/stable-diffusion-xl-base-1.0',
 
20
  'black-forest-labs/FLUX.1-dev',
21
  'John6666/blue-pencil-flux1-v021-fp8-flux',
22
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
23
  'John6666/xe-anime-flux-v04-fp8-flux',
24
  'John6666/lyh-anime-flux-v2a1-fp8-flux',
25
  'John6666/carnival-unchained-v10-fp8-flux',
 
26
  'Freepik/flux.1-lite-8B-alpha',
 
 
 
27
  'ostris/OpenFLUX.1',
 
 
28
  'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
 
29
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
 
30
  'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
31
  'John6666/noobai-cyberfix-v10-sdxl',
32
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
 
 
 
 
 
 
33
  'John6666/complicated-noobai-merge-vprediction-sdxl',
 
 
 
 
34
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
35
  'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
36
  'John6666/chadmix-noobai075-illustrious01-v10-sdxl',
@@ -38,9 +57,17 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
38
  'John6666/illustriousxl-mmmix-v50-sdxl',
39
  'John6666/illustrious-pencil-xl-v200-sdxl',
40
  'John6666/obsession-illustriousxl-v21-sdxl',
 
 
 
 
 
 
41
  'eienmojiki/Anything-XL',
42
  'eienmojiki/Starry-XL-v5.2',
43
  'John6666/meinaxl-v2-sdxl',
 
 
44
  'John6666/epicrealism-xl-v10kiss2-sdxl',
45
  'John6666/epicrealism-xl-v8kiss-sdxl',
46
  'misri/zavychromaxl_v80',
@@ -48,21 +75,21 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
48
  'SG161222/RealVisXL_V5.0',
49
  'misri/newrealityxlAllInOne_Newreality40',
50
  'gsdf/CounterfeitXL',
51
- 'John6666/silvermoon-mix-01xl-v11-sdxl',
52
  'WhiteAiZ/autismmixSDXL_autismmixConfetti_diffusers',
53
  'kitty7779/ponyDiffusionV6XL',
54
  'GraydientPlatformAPI/aniverse-pony',
55
  'John6666/ras-real-anime-screencap-v1-sdxl',
56
  'John6666/duchaiten-pony-xl-no-score-v60-sdxl',
57
  'John6666/mistoon-anime-ponyalpha-sdxl',
58
- 'John6666/3x3x3mixxl-v2-sdxl',
59
- 'John6666/3x3x3mixxl-3dv01-sdxl',
60
  'John6666/ebara-mfcg-pony-mix-v12-sdxl',
61
  'John6666/t-ponynai3-v51-sdxl',
62
  'John6666/t-ponynai3-v65-sdxl',
63
  'John6666/prefect-pony-xl-v3-sdxl',
 
64
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
 
65
  'John6666/wai-real-mix-v11-sdxl',
 
66
  'John6666/wai-c-v6-sdxl',
67
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
68
  'John6666/sifw-annihilation-xl-v2-sdxl',
@@ -79,8 +106,6 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
79
  'John6666/duchaiten-pony-real-v11fix-sdxl',
80
  'John6666/duchaiten-pony-real-v20-sdxl',
81
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
82
- 'Eugeoter/artiwaifu-diffusion-2.0',
83
- 'comin/IterComp',
84
  'KBlueLeaf/Kohaku-XL-Zeta',
85
  'cagliostrolab/animagine-xl-3.1',
86
  'yodayo-ai/kivotos-xl-2.0',
@@ -94,7 +119,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
94
  'digiplay/DarkSushi2.5D_v1',
95
  'digiplay/darkphoenix3D_v1.1',
96
  'digiplay/BeenYouLiteL11_diffusers',
97
- 'Yntec/RevAnimatedV2Rebirth',
98
  'youknownothing/cyberrealistic_v50',
99
  'youknownothing/deliberate-v6',
100
  'GraydientPlatformAPI/deliberate-cyber3',
@@ -120,9 +145,9 @@ DIFFUSERS_FORMAT_LORAS = [
120
 
121
  DOWNLOAD_EMBEDS = [
122
  'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
123
- 'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
124
- 'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
125
- ]
126
 
127
  CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
128
  HF_TOKEN = os.environ.get("HF_READ_TOKEN")
@@ -132,78 +157,8 @@ DIRECTORY_LORAS = 'loras'
132
  DIRECTORY_VAES = 'vaes'
133
  DIRECTORY_EMBEDS = 'embedings'
134
 
135
- PREPROCESSOR_CONTROLNET = {
136
- "openpose": [
137
- "Openpose",
138
- "None",
139
- ],
140
- "scribble": [
141
- "HED",
142
- "PidiNet",
143
- "None",
144
- ],
145
- "softedge": [
146
- "PidiNet",
147
- "HED",
148
- "HED safe",
149
- "PidiNet safe",
150
- "None",
151
- ],
152
- "segmentation": [
153
- "UPerNet",
154
- "None",
155
- ],
156
- "depth": [
157
- "DPT",
158
- "Midas",
159
- "None",
160
- ],
161
- "normalbae": [
162
- "NormalBae",
163
- "None",
164
- ],
165
- "lineart": [
166
- "Lineart",
167
- "Lineart coarse",
168
- "Lineart (anime)",
169
- "None",
170
- "None (anime)",
171
- ],
172
- "lineart_anime": [
173
- "Lineart",
174
- "Lineart coarse",
175
- "Lineart (anime)",
176
- "None",
177
- "None (anime)",
178
- ],
179
- "shuffle": [
180
- "ContentShuffle",
181
- "None",
182
- ],
183
- "canny": [
184
- "Canny",
185
- "None",
186
- ],
187
- "mlsd": [
188
- "MLSD",
189
- "None",
190
- ],
191
- "ip2p": [
192
- "ip2p"
193
- ],
194
- "recolor": [
195
- "Recolor luminance",
196
- "Recolor intensity",
197
- "None",
198
- ],
199
- "tile": [
200
- "Mild Blur",
201
- "Moderate Blur",
202
- "Heavy Blur",
203
- "None",
204
- ],
205
-
206
- }
207
 
208
  TASK_STABLEPY = {
209
  'txt2img': 'txt2img',
@@ -261,11 +216,74 @@ UPSCALER_DICT_GUI = {
261
 
262
  UPSCALER_KEYS = list(UPSCALER_DICT_GUI.keys())
263
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
  PROMPT_W_OPTIONS = [
265
  ("Compel format: (word)weight", "Compel"),
266
  ("Classic format: (word:weight)", "Classic"),
267
  ("Classic-original format: (word:weight)", "Classic-original"),
268
  ("Classic-no_norm format: (word:weight)", "Classic-no_norm"),
 
269
  ("Classic-ignore", "Classic-ignore"),
270
  ("None", "None"),
271
  ]
@@ -291,6 +309,13 @@ MODEL_TYPE_CLASS = {
291
  "diffusers:FluxPipeline": "FLUX",
292
  }
293
 
 
 
 
 
 
 
 
294
  POST_PROCESSING_SAMPLER = ["Use same sampler"] + [
295
  name_s for name_s in scheduler_names if "Auto-Loader" not in name_s
296
  ]
@@ -341,7 +366,7 @@ EXAMPLES_GUI = [
341
  1.0, # cn scale
342
  0.0, # cn start
343
  1.0, # cn end
344
- "Classic",
345
  "Nearest",
346
  45,
347
  False,
@@ -354,7 +379,7 @@ EXAMPLES_GUI = [
354
  -1,
355
  "None",
356
  0.33,
357
- "FlowMatchEuler",
358
  1152,
359
  896,
360
  "black-forest-labs/FLUX.1-dev",
@@ -378,7 +403,7 @@ EXAMPLES_GUI = [
378
  -1,
379
  "None",
380
  0.33,
381
- "DPM++ 2M SDE Lu",
382
  1024,
383
  1024,
384
  "John6666/epicrealism-xl-v10kiss2-sdxl",
@@ -412,7 +437,7 @@ EXAMPLES_GUI = [
412
  0.35, # strength
413
  1.0, # cn scale
414
  0.05, # cn start
415
- 0.75, # cn end
416
  "Classic",
417
  None,
418
  35,
@@ -461,7 +486,7 @@ EXAMPLES_GUI = [
461
  1.0, # cn scale
462
  0.0, # cn start
463
  0.9, # cn end
464
- "Compel",
465
  "Latent (antialiased)",
466
  46,
467
  False,
 
7
  )
8
 
9
  # - **Download Models**
10
+ DOWNLOAD_MODEL = "https://huggingface.co/TechnoByte/MilkyWonderland/resolve/main/milkyWonderland_v40.safetensors"
11
 
12
  # - **Download VAEs**
13
+ DOWNLOAD_VAE = "https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true"
14
 
15
  # - **Download LoRAs**
16
  DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book_-_LineArt.safetensors, https://civitai.com/api/download/models/135867, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-8steps-CFG-lora.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-8steps-CFG-lora.safetensors?download=true"
17
 
18
  LOAD_DIFFUSERS_FORMAT_MODEL = [
19
  'stabilityai/stable-diffusion-xl-base-1.0',
20
+ 'Laxhar/noobai-XL-1.1',
21
  'black-forest-labs/FLUX.1-dev',
22
  'John6666/blue-pencil-flux1-v021-fp8-flux',
23
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
24
  'John6666/xe-anime-flux-v04-fp8-flux',
25
  'John6666/lyh-anime-flux-v2a1-fp8-flux',
26
  'John6666/carnival-unchained-v10-fp8-flux',
27
+ 'John6666/iniverse-mix-xl-sfwnsfw-fluxdfp16nsfwv11-fp8-flux',
28
  'Freepik/flux.1-lite-8B-alpha',
29
+ 'shauray/FluxDev-HyperSD-merged',
30
+ 'mikeyandfriends/PixelWave_FLUX.1-dev_03',
31
+ 'terminusresearch/FluxBooru-v0.3',
32
  'ostris/OpenFLUX.1',
33
+ 'shuttleai/shuttle-3-diffusion',
34
+ 'Laxhar/noobai-XL-1.0',
35
  'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
36
+ 'Laxhar/noobai-XL-0.77',
37
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
38
+ 'Laxhar/noobai-XL-0.6',
39
  'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
40
  'John6666/noobai-cyberfix-v10-sdxl',
41
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
42
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v40-sdxl',
43
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-ntrmix35-sdxl',
44
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v777-sdxl',
45
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v777forlora-sdxl',
46
+ 'John6666/haruki-mix-illustrious-v10-sdxl',
47
+ 'John6666/noobreal-v10-sdxl',
48
  'John6666/complicated-noobai-merge-vprediction-sdxl',
49
+ 'Laxhar/noobai-XL-Vpred-0.65s',
50
+ 'Laxhar/noobai-XL-Vpred-0.65',
51
+ 'Laxhar/noobai-XL-Vpred-0.6',
52
+ 'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
53
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
54
  'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
55
  'John6666/chadmix-noobai075-illustrious01-v10-sdxl',
 
57
  'John6666/illustriousxl-mmmix-v50-sdxl',
58
  'John6666/illustrious-pencil-xl-v200-sdxl',
59
  'John6666/obsession-illustriousxl-v21-sdxl',
60
+ 'John6666/obsession-illustriousxl-v30-sdxl',
61
+ 'John6666/wai-nsfw-illustrious-v70-sdxl',
62
+ 'John6666/illustrious-pony-mix-v3-sdxl',
63
+ 'John6666/nova-anime-xl-illustriousv10-sdxl',
64
+ 'John6666/nova-orange-xl-v30-sdxl',
65
+ 'John6666/silvermoon-mix03-illustrious-v10-sdxl',
66
  'eienmojiki/Anything-XL',
67
  'eienmojiki/Starry-XL-v5.2',
68
  'John6666/meinaxl-v2-sdxl',
69
+ 'Eugeoter/artiwaifu-diffusion-2.0',
70
+ 'comin/IterComp',
71
  'John6666/epicrealism-xl-v10kiss2-sdxl',
72
  'John6666/epicrealism-xl-v8kiss-sdxl',
73
  'misri/zavychromaxl_v80',
 
75
  'SG161222/RealVisXL_V5.0',
76
  'misri/newrealityxlAllInOne_Newreality40',
77
  'gsdf/CounterfeitXL',
 
78
  'WhiteAiZ/autismmixSDXL_autismmixConfetti_diffusers',
79
  'kitty7779/ponyDiffusionV6XL',
80
  'GraydientPlatformAPI/aniverse-pony',
81
  'John6666/ras-real-anime-screencap-v1-sdxl',
82
  'John6666/duchaiten-pony-xl-no-score-v60-sdxl',
83
  'John6666/mistoon-anime-ponyalpha-sdxl',
 
 
84
  'John6666/ebara-mfcg-pony-mix-v12-sdxl',
85
  'John6666/t-ponynai3-v51-sdxl',
86
  'John6666/t-ponynai3-v65-sdxl',
87
  'John6666/prefect-pony-xl-v3-sdxl',
88
+ 'John6666/prefect-pony-xl-v4-sdxl',
89
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
90
+ 'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
91
  'John6666/wai-real-mix-v11-sdxl',
92
+ 'John6666/wai-shuffle-pdxl-v2-sdxl',
93
  'John6666/wai-c-v6-sdxl',
94
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
95
  'John6666/sifw-annihilation-xl-v2-sdxl',
 
106
  'John6666/duchaiten-pony-real-v11fix-sdxl',
107
  'John6666/duchaiten-pony-real-v20-sdxl',
108
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
 
 
109
  'KBlueLeaf/Kohaku-XL-Zeta',
110
  'cagliostrolab/animagine-xl-3.1',
111
  'yodayo-ai/kivotos-xl-2.0',
 
119
  'digiplay/DarkSushi2.5D_v1',
120
  'digiplay/darkphoenix3D_v1.1',
121
  'digiplay/BeenYouLiteL11_diffusers',
122
+ 'GraydientPlatformAPI/rev-animated2',
123
  'youknownothing/cyberrealistic_v50',
124
  'youknownothing/deliberate-v6',
125
  'GraydientPlatformAPI/deliberate-cyber3',
 
145
 
146
  DOWNLOAD_EMBEDS = [
147
  'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
148
+ # 'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
149
+ # 'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
150
+ ]
151
 
152
  CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
153
  HF_TOKEN = os.environ.get("HF_READ_TOKEN")
 
157
  DIRECTORY_VAES = 'vaes'
158
  DIRECTORY_EMBEDS = 'embedings'
159
 
160
+ CACHE_HF = "/home/user/.cache/huggingface/hub/"
161
+ STORAGE_ROOT = "/home/user/"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
163
  TASK_STABLEPY = {
164
  'txt2img': 'txt2img',
 
216
 
217
  UPSCALER_KEYS = list(UPSCALER_DICT_GUI.keys())
218
 
219
+ DIFFUSERS_CONTROLNET_MODEL = [
220
+ "Automatic",
221
+
222
+ "xinsir/controlnet-union-sdxl-1.0",
223
+ "xinsir/anime-painter",
224
+ "Eugeoter/noob-sdxl-controlnet-canny",
225
+ "Eugeoter/noob-sdxl-controlnet-lineart_anime",
226
+ "Eugeoter/noob-sdxl-controlnet-depth",
227
+ "Eugeoter/noob-sdxl-controlnet-normal",
228
+ "Eugeoter/noob-sdxl-controlnet-softedge_hed",
229
+ "Eugeoter/noob-sdxl-controlnet-scribble_pidinet",
230
+ "Eugeoter/noob-sdxl-controlnet-scribble_hed",
231
+ "Eugeoter/noob-sdxl-controlnet-manga_line",
232
+ "Eugeoter/noob-sdxl-controlnet-lineart_realistic",
233
+ "Eugeoter/noob-sdxl-controlnet-depth_midas-v1-1",
234
+ "dimitribarbot/controlnet-openpose-sdxl-1.0-safetensors",
235
+ "r3gm/controlnet-openpose-sdxl-1.0-fp16",
236
+ "r3gm/controlnet-canny-scribble-integrated-sdxl-v2-fp16",
237
+ "r3gm/controlnet-union-sdxl-1.0-fp16",
238
+ "r3gm/controlnet-lineart-anime-sdxl-fp16",
239
+ "r3gm/control_v1p_sdxl_qrcode_monster_fp16",
240
+ "r3gm/controlnet-tile-sdxl-1.0-fp16",
241
+ "r3gm/controlnet-recolor-sdxl-fp16",
242
+ "r3gm/controlnet-openpose-twins-sdxl-1.0-fp16",
243
+ "r3gm/controlnet-qr-pattern-sdxl-fp16",
244
+ "brad-twinkl/controlnet-union-sdxl-1.0-promax",
245
+ "Yakonrus/SDXL_Controlnet_Tile_Realistic_v2",
246
+ "TheMistoAI/MistoLine",
247
+ "briaai/BRIA-2.3-ControlNet-Recoloring",
248
+ "briaai/BRIA-2.3-ControlNet-Canny",
249
+
250
+ "lllyasviel/control_v11p_sd15_openpose",
251
+ "lllyasviel/control_v11p_sd15_canny",
252
+ "lllyasviel/control_v11p_sd15_mlsd",
253
+ "lllyasviel/control_v11p_sd15_scribble",
254
+ "lllyasviel/control_v11p_sd15_softedge",
255
+ "lllyasviel/control_v11p_sd15_seg",
256
+ "lllyasviel/control_v11f1p_sd15_depth",
257
+ "lllyasviel/control_v11p_sd15_normalbae",
258
+ "lllyasviel/control_v11p_sd15_lineart",
259
+ "lllyasviel/control_v11p_sd15s2_lineart_anime",
260
+ "lllyasviel/control_v11e_sd15_shuffle",
261
+ "lllyasviel/control_v11e_sd15_ip2p",
262
+ "lllyasviel/control_v11p_sd15_inpaint",
263
+ "monster-labs/control_v1p_sd15_qrcode_monster",
264
+ "lllyasviel/control_v11f1e_sd15_tile",
265
+ "latentcat/control_v1p_sd15_brightness",
266
+ "yuanqiuye/qrcode_controlnet_v3",
267
+
268
+ "Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
269
+ # "Shakker-Labs/FLUX.1-dev-ControlNet-Pose",
270
+ # "Shakker-Labs/FLUX.1-dev-ControlNet-Depth",
271
+ # "jasperai/Flux.1-dev-Controlnet-Upscaler",
272
+ # "jasperai/Flux.1-dev-Controlnet-Depth",
273
+ # "jasperai/Flux.1-dev-Controlnet-Surface-Normals",
274
+ # "XLabs-AI/flux-controlnet-canny-diffusers",
275
+ # "XLabs-AI/flux-controlnet-hed-diffusers",
276
+ # "XLabs-AI/flux-controlnet-depth-diffusers",
277
+ # "InstantX/FLUX.1-dev-Controlnet-Union",
278
+ # "InstantX/FLUX.1-dev-Controlnet-Canny",
279
+ ]
280
+
281
  PROMPT_W_OPTIONS = [
282
  ("Compel format: (word)weight", "Compel"),
283
  ("Classic format: (word:weight)", "Classic"),
284
  ("Classic-original format: (word:weight)", "Classic-original"),
285
  ("Classic-no_norm format: (word:weight)", "Classic-no_norm"),
286
+ ("Classic-sd_embed format: (word:weight)", "Classic-sd_embed"),
287
  ("Classic-ignore", "Classic-ignore"),
288
  ("None", "None"),
289
  ]
 
309
  "diffusers:FluxPipeline": "FLUX",
310
  }
311
 
312
+ DIFFUSECRAFT_CHECKPOINT_NAME = {
313
+ "sd1.5": "SD 1.5",
314
+ "sdxl": "SDXL",
315
+ "flux-dev": "FLUX",
316
+ "flux-schnell": "FLUX",
317
+ }
318
+
319
  POST_PROCESSING_SAMPLER = ["Use same sampler"] + [
320
  name_s for name_s in scheduler_names if "Auto-Loader" not in name_s
321
  ]
 
366
  1.0, # cn scale
367
  0.0, # cn start
368
  1.0, # cn end
369
+ "Classic-no_norm",
370
  "Nearest",
371
  45,
372
  False,
 
379
  -1,
380
  "None",
381
  0.33,
382
+ "FlowMatch Euler",
383
  1152,
384
  896,
385
  "black-forest-labs/FLUX.1-dev",
 
403
  -1,
404
  "None",
405
  0.33,
406
+ "DPM++ 2M SDE Ef",
407
  1024,
408
  1024,
409
  "John6666/epicrealism-xl-v10kiss2-sdxl",
 
437
  0.35, # strength
438
  1.0, # cn scale
439
  0.05, # cn start
440
+ 0.8, # cn end
441
  "Classic",
442
  None,
443
  35,
 
486
  1.0, # cn scale
487
  0.0, # cn start
488
  0.9, # cn end
489
+ "Classic-original",
490
  "Latent (antialiased)",
491
  46,
492
  False,
dc.py CHANGED
@@ -5,9 +5,9 @@ from stablepy import (
5
  SCHEDULE_TYPE_OPTIONS,
6
  SCHEDULE_PREDICTION_TYPE_OPTIONS,
7
  check_scheduler_compatibility,
 
8
  )
9
  from constants import (
10
- PREPROCESSOR_CONTROLNET,
11
  TASK_STABLEPY,
12
  TASK_MODEL_LIST,
13
  UPSCALER_DICT_GUI,
@@ -17,6 +17,7 @@ from constants import (
17
  SDXL_TASK,
18
  MODEL_TYPE_TASK,
19
  POST_PROCESSING_SAMPLER,
 
20
 
21
  )
22
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
@@ -36,33 +37,33 @@ from utils import (
36
  extract_exif_data,
37
  create_mask_now,
38
  download_diffuser_repo,
 
 
39
  progress_step_bar,
40
  html_template_message,
41
  escape_html,
42
  )
 
43
  from datetime import datetime
44
  import gradio as gr
45
  import logging
46
  import diffusers
47
  import warnings
48
  from stablepy import logger
 
49
  # import urllib.parse
50
 
51
  ImageFile.LOAD_TRUNCATED_IMAGES = True
 
52
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
53
  print(os.getenv("SPACES_ZERO_GPU"))
54
 
55
  ## BEGIN MOD
56
- import gradio as gr
57
- import logging
58
  logging.getLogger("diffusers").setLevel(logging.ERROR)
59
- import diffusers
60
  diffusers.utils.logging.set_verbosity(40)
61
- import warnings
62
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
63
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
64
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
65
- from stablepy import logger
66
  logger.setLevel(logging.DEBUG)
67
 
68
  from env import (
@@ -109,8 +110,8 @@ for url_embed in DOWNLOAD_EMBEDS:
109
 
110
  # Build list models
111
  embed_list = get_model_list(DIRECTORY_EMBEDS)
112
- model_list = get_model_list(DIRECTORY_MODELS)
113
- model_list = load_diffusers_format_model + model_list
114
 
115
  ## BEGIN MOD
116
  lora_model_list = get_lora_model_list()
@@ -128,6 +129,16 @@ def get_embed_list(pipeline_name):
128
 
129
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
130
 
 
 
 
 
 
 
 
 
 
 
131
  ## BEGIN MOD
132
  class GuiSD:
133
  def __init__(self, stream=True):
@@ -135,19 +146,28 @@ class GuiSD:
135
  self.status_loading = False
136
  self.sleep_loading = 4
137
  self.last_load = datetime.now()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
- def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
140
- #progress(0, desc="Start inference...")
141
- images, seed, image_list, metadata = model(**pipe_params)
142
- #progress(1, desc="Inference completed.")
143
- if not isinstance(images, list): images = [images]
144
- images = save_images(images, metadata)
145
- img = []
146
- for image in images:
147
- img.append((image, None))
148
- return img
149
-
150
- def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
151
  vae_model = vae_model if vae_model != "None" else None
152
  model_type = get_model_type(model_name)
153
  dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
@@ -160,6 +180,8 @@ class GuiSD:
160
  token=True,
161
  )
162
 
 
 
163
  for i in range(68):
164
  if not self.status_loading:
165
  self.status_loading = True
@@ -197,17 +219,19 @@ class GuiSD:
197
  vae_model=vae_model,
198
  type_model_precision=dtype_model,
199
  retain_task_model_in_cache=False,
 
200
  device="cpu",
 
201
  )
 
202
  else:
203
-
204
  if self.model.base_model_id != model_name:
205
  load_now_time = datetime.now()
206
  elapsed_time = max((load_now_time - self.last_load).total_seconds(), 0)
207
 
208
- if elapsed_time <= 8:
209
  print("Waiting for the previous model's time ops...")
210
- time.sleep(8-elapsed_time)
211
 
212
  self.model.device = torch.device("cpu")
213
  self.model.load_pipe(
@@ -216,6 +240,7 @@ class GuiSD:
216
  vae_model=vae_model,
217
  type_model_precision=dtype_model,
218
  retain_task_model_in_cache=False,
 
219
  )
220
 
221
  end_time = time.time()
@@ -252,6 +277,10 @@ class GuiSD:
252
  lora_scale4,
253
  lora5,
254
  lora_scale5,
 
 
 
 
255
  sampler,
256
  schedule_type,
257
  schedule_prediction_type,
@@ -272,6 +301,8 @@ class GuiSD:
272
  high_threshold,
273
  value_threshold,
274
  distance_threshold,
 
 
275
  controlnet_output_scaling_in_unet,
276
  controlnet_start_threshold,
277
  controlnet_stop_threshold,
@@ -288,6 +319,9 @@ class GuiSD:
288
  hires_negative_prompt,
289
  hires_before_adetailer,
290
  hires_after_adetailer,
 
 
 
291
  loop_generation,
292
  leave_progress_bar,
293
  disable_progress_bar,
@@ -329,6 +363,7 @@ class GuiSD:
329
  mask_blur_b,
330
  mask_padding_b,
331
  retain_task_cache_gui,
 
332
  image_ip1,
333
  mask_ip1,
334
  model_ip1,
@@ -345,7 +380,7 @@ class GuiSD:
345
  yield info_state, gr.update(), gr.update()
346
 
347
  vae_model = vae_model if vae_model != "None" else None
348
- loras_list = [lora1, lora2, lora3, lora4, lora5]
349
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
350
  msg_lora = ""
351
 
@@ -454,6 +489,8 @@ class GuiSD:
454
  "high_threshold": high_threshold,
455
  "value_threshold": value_threshold,
456
  "distance_threshold": distance_threshold,
 
 
457
  "lora_A": lora1 if lora1 != "None" else None,
458
  "lora_scale_A": lora_scale1,
459
  "lora_B": lora2 if lora2 != "None" else None,
@@ -464,6 +501,10 @@ class GuiSD:
464
  "lora_scale_D": lora_scale4,
465
  "lora_E": lora5 if lora5 != "None" else None,
466
  "lora_scale_E": lora_scale5,
 
 
 
 
467
  ## BEGIN MOD
468
  "textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
469
  ## END MOD
@@ -507,6 +548,8 @@ class GuiSD:
507
  "hires_sampler": hires_sampler,
508
  "hires_before_adetailer": hires_before_adetailer,
509
  "hires_after_adetailer": hires_after_adetailer,
 
 
510
  "ip_adapter_image": params_ip_img,
511
  "ip_adapter_mask": params_ip_msk,
512
  "ip_adapter_model": params_ip_model,
@@ -514,13 +557,15 @@ class GuiSD:
514
  "ip_adapter_scale": params_ip_scale,
515
  }
516
 
 
 
 
 
517
  self.model.device = torch.device("cuda:0")
518
- if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * 5:
519
  self.model.pipe.transformer.to(self.model.device)
520
  print("transformer to cuda")
521
 
522
- #return self.infer_short(self.model, pipe_params), info_state
523
-
524
  actual_progress = 0
525
  info_images = gr.update()
526
  for img, [seed, image_path, metadata] in self.model(**pipe_params):
@@ -545,7 +590,7 @@ class GuiSD:
545
  if msg_lora:
546
  info_images += msg_lora
547
 
548
- info_images = info_images + "<br>" + "GENERATION DATA:<br>" + escape_html(metadata[0]) + "<br>-------<br>"
549
 
550
  download_links = "<br>".join(
551
  [
@@ -580,37 +625,38 @@ def dummy_gpu():
580
 
581
 
582
  def sd_gen_generate_pipeline(*args):
583
-
584
  gpu_duration_arg = int(args[-1]) if args[-1] else 59
585
  verbose_arg = int(args[-2])
586
  load_lora_cpu = args[-3]
587
  generation_args = args[:-3]
588
  lora_list = [
589
  None if item == "None" or item == "" else item # MOD
590
- for item in [args[7], args[9], args[11], args[13], args[15]]
591
  ]
592
- lora_status = [None] * 5
593
 
594
  msg_load_lora = "Updating LoRAs in GPU..."
595
  if load_lora_cpu:
596
- msg_load_lora = "Updating LoRAs in CPU (Slow but saves GPU usage)..."
597
 
598
- if lora_list != sd_gen.model.lora_memory and lora_list != [None] * 5:
599
  yield msg_load_lora, gr.update(), gr.update()
600
 
601
  # Load lora in CPU
602
  if load_lora_cpu:
603
- lora_status = sd_gen.model.lora_merge(
604
  lora_A=lora_list[0], lora_scale_A=args[8],
605
  lora_B=lora_list[1], lora_scale_B=args[10],
606
  lora_C=lora_list[2], lora_scale_C=args[12],
607
  lora_D=lora_list[3], lora_scale_D=args[14],
608
  lora_E=lora_list[4], lora_scale_E=args[16],
 
 
609
  )
610
  print(lora_status)
611
 
612
- sampler_name = args[17]
613
- schedule_type_name = args[18]
614
  _, _, msg_sampler = check_scheduler_compatibility(
615
  sd_gen.model.class_name, sampler_name, schedule_type_name
616
  )
@@ -624,7 +670,7 @@ def sd_gen_generate_pipeline(*args):
624
  elif status is not None:
625
  gr.Warning(f"Failed to load LoRA: {lora}")
626
 
627
- if lora_status == [None] * 5 and sd_gen.model.lora_memory != [None] * 5 and load_lora_cpu:
628
  lora_cache_msg = ", ".join(
629
  str(x) for x in sd_gen.model.lora_memory if x is not None
630
  )
@@ -640,7 +686,6 @@ def sd_gen_generate_pipeline(*args):
640
 
641
  # yield from sd_gen.generate_pipeline(*generation_args)
642
  yield from dynamic_gpu_duration(
643
- #return dynamic_gpu_duration(
644
  sd_gen.generate_pipeline,
645
  gpu_duration_arg,
646
  *generation_args,
@@ -682,6 +727,7 @@ def esrgan_upscale(image, upscaler_name, upscaler_size):
682
  return image_path
683
 
684
 
 
685
  dynamic_gpu_duration.zerogpu = True
686
  sd_gen_generate_pipeline.zerogpu = True
687
  sd_gen = GuiSD()
@@ -694,32 +740,115 @@ import numpy as np
694
  import random
695
  import json
696
  import shutil
697
- from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
 
698
  get_local_model_list, get_private_lora_model_lists, get_valid_lora_name, get_state, set_state,
699
  get_valid_lora_path, get_valid_lora_wt, get_lora_info, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
700
- normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history)
 
 
701
 
702
 
703
  #@spaces.GPU
704
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
705
- model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
706
- lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
707
- sampler = "Euler", vae = None, translate=True, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
708
- recom_prompt = True, progress=gr.Progress(track_tqdm=True)):
709
  MAX_SEED = np.iinfo(np.int32).max
710
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
711
  image_previews = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
712
  load_lora_cpu = False
713
  verbose_info = False
714
- gpu_duration = 59
715
- filename_pattern = "model,seed"
716
 
717
  images: list[tuple[PIL.Image.Image, str | None]] = []
718
  progress(0, desc="Preparing...")
719
 
720
- if randomize_seed:
721
- seed = random.randint(0, MAX_SEED)
722
-
723
  generator = torch.Generator().manual_seed(seed).seed()
724
 
725
  if translate:
@@ -728,31 +857,38 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
728
 
729
  prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name, recom_prompt)
730
  progress(0.5, desc="Preparing...")
731
- lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
732
- set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
733
  lora1 = get_valid_lora_path(lora1)
734
  lora2 = get_valid_lora_path(lora2)
735
  lora3 = get_valid_lora_path(lora3)
736
  lora4 = get_valid_lora_path(lora4)
737
  lora5 = get_valid_lora_path(lora5)
 
 
738
  progress(1, desc="Preparation completed. Starting inference...")
739
 
740
  progress(0, desc="Loading model...")
741
- for _ in sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0]):
742
  pass
743
  progress(1, desc="Model loaded.")
744
  progress(0, desc="Starting Inference...")
745
  for info_state, stream_images, info_images in sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
746
- guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
747
- lora4, lora4_wt, lora5, lora5_wt, sampler, schedule_type, schedule_prediction_type,
748
- height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,
749
- None, None, None, 0.35, 100, 200, 0.1, 0.1, 1.0, 0., 1., False, "Classic", None,
750
- 1.0, 100, 10, 30, 0.55, "Use same sampler", "", "",
751
- False, True, 1, True, False, image_previews, False, False, filename_pattern, "./images", False, False, False, True, 1, 0.55,
752
- False, False, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
753
- False, "", "", 0.35, True, True, False, 4, 4, 32,
754
- True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, 0.0,
755
- load_lora_cpu, verbose_info, gpu_duration
 
 
 
 
 
756
  ):
757
  images = stream_images if isinstance(stream_images, list) else images
758
  progress(1, desc="Inference completed.")
@@ -763,10 +899,10 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
763
 
764
  #@spaces.GPU
765
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
766
- model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
767
- lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
768
- sampler = "Euler", vae = None, translate = True, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
769
- recom_prompt = True, progress=gr.Progress(track_tqdm=True)):
770
  return gr.update()
771
 
772
 
@@ -809,337 +945,6 @@ def enable_diffusers_model_detail(is_enable: bool = False, model_name: str = "",
809
  return gr.update(value=is_enable), gr.update(value=new_value, choices=get_diffusers_model_list(state)), state
810
 
811
 
812
- def load_model_prompt_dict():
813
- dict = {}
814
- try:
815
- with open('model_dict.json', encoding='utf-8') as f:
816
- dict = json.load(f)
817
- except Exception:
818
- pass
819
- return dict
820
-
821
-
822
- model_prompt_dict = load_model_prompt_dict()
823
-
824
-
825
- animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
826
- animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
827
- pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
828
- pony_nps = to_list("source_pony, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
829
- other_ps = to_list("anime artwork, anime style, studio anime, highly detailed, cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed")
830
- other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
831
- default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
832
- default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
833
- def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None", model_recom_prompt_enabled = True):
834
- if not model_recom_prompt_enabled or not model_name: return prompt, neg_prompt
835
- prompts = to_list(prompt)
836
- neg_prompts = to_list(neg_prompt)
837
- prompts = list_sub(prompts, animagine_ps + pony_ps + other_ps)
838
- neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + other_nps)
839
- last_empty_p = [""] if not prompts and type != "None" else []
840
- last_empty_np = [""] if not neg_prompts and type != "None" else []
841
- ps = []
842
- nps = []
843
- if model_name in model_prompt_dict.keys():
844
- ps = to_list(model_prompt_dict[model_name]["prompt"])
845
- nps = to_list(model_prompt_dict[model_name]["negative_prompt"])
846
- else:
847
- ps = default_ps
848
- nps = default_nps
849
- prompts = prompts + ps
850
- neg_prompts = neg_prompts + nps
851
- prompt = ", ".join(list_uniq(prompts) + last_empty_p)
852
- neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
853
- return prompt, neg_prompt
854
-
855
-
856
- private_lora_dict = {}
857
- try:
858
- with open('lora_dict.json', encoding='utf-8') as f:
859
- d = json.load(f)
860
- for k, v in d.items():
861
- private_lora_dict[escape_lora_basename(k)] = v
862
- except Exception:
863
- pass
864
-
865
-
866
- private_lora_model_list = get_private_lora_model_lists()
867
- loras_dict = {"None": ["", "", "", "", ""], "": ["", "", "", "", ""]} | private_lora_dict.copy()
868
- loras_url_to_path_dict = {} # {"URL to download": "local filepath", ...}
869
- civitai_last_results = {} # {"URL to download": {search results}, ...}
870
- all_lora_list = []
871
-
872
-
873
- def get_all_lora_list():
874
- global all_lora_list
875
- loras = get_lora_model_list()
876
- all_lora_list = loras.copy()
877
- return loras
878
-
879
-
880
- def get_all_lora_tupled_list():
881
- global loras_dict
882
- models = get_all_lora_list()
883
- if not models: return []
884
- tupled_list = []
885
- for model in models:
886
- #if not model: continue # to avoid GUI-related bug
887
- basename = Path(model).stem
888
- key = to_lora_key(model)
889
- items = None
890
- if key in loras_dict.keys():
891
- items = loras_dict.get(key, None)
892
- else:
893
- items = get_civitai_info(model)
894
- if items != None:
895
- loras_dict[key] = items
896
- name = basename
897
- value = model
898
- if items and items[2] != "":
899
- if items[1] == "Pony":
900
- name = f"{basename} (for {items[1]}🐴, {items[2]})"
901
- else:
902
- name = f"{basename} (for {items[1]}, {items[2]})"
903
- tupled_list.append((name, value))
904
- return tupled_list
905
-
906
-
907
- def update_lora_dict(path: str):
908
- global loras_dict
909
- key = to_lora_key(path)
910
- if key in loras_dict.keys(): return
911
- items = get_civitai_info(path)
912
- if items == None: return
913
- loras_dict[key] = items
914
-
915
-
916
- def download_lora(dl_urls: str):
917
- global loras_url_to_path_dict
918
- dl_path = ""
919
- before = get_local_model_list(DIRECTORY_LORAS)
920
- urls = []
921
- for url in [url.strip() for url in dl_urls.split(',')]:
922
- local_path = f"{DIRECTORY_LORAS}/{url.split('/')[-1]}"
923
- if not Path(local_path).exists():
924
- download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY)
925
- urls.append(url)
926
- after = get_local_model_list(DIRECTORY_LORAS)
927
- new_files = list_sub(after, before)
928
- i = 0
929
- for file in new_files:
930
- path = Path(file)
931
- if path.exists():
932
- new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
933
- path.resolve().rename(new_path.resolve())
934
- loras_url_to_path_dict[urls[i]] = str(new_path)
935
- update_lora_dict(str(new_path))
936
- dl_path = str(new_path)
937
- i += 1
938
- return dl_path
939
-
940
-
941
- def copy_lora(path: str, new_path: str):
942
- if path == new_path: return new_path
943
- cpath = Path(path)
944
- npath = Path(new_path)
945
- if cpath.exists():
946
- try:
947
- shutil.copy(str(cpath.resolve()), str(npath.resolve()))
948
- except Exception:
949
- return None
950
- update_lora_dict(str(npath))
951
- return new_path
952
- else:
953
- return None
954
-
955
-
956
- def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str):
957
- path = download_lora(dl_urls)
958
- if path:
959
- if not lora1 or lora1 == "None":
960
- lora1 = path
961
- elif not lora2 or lora2 == "None":
962
- lora2 = path
963
- elif not lora3 or lora3 == "None":
964
- lora3 = path
965
- elif not lora4 or lora4 == "None":
966
- lora4 = path
967
- elif not lora5 or lora5 == "None":
968
- lora5 = path
969
- choices = get_all_lora_tupled_list()
970
- return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
971
- gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
972
-
973
-
974
- def set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
975
- import re
976
- lora1 = get_valid_lora_name(lora1, model_name)
977
- lora2 = get_valid_lora_name(lora2, model_name)
978
- lora3 = get_valid_lora_name(lora3, model_name)
979
- lora4 = get_valid_lora_name(lora4, model_name)
980
- lora5 = get_valid_lora_name(lora5, model_name)
981
- if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
982
- lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
983
- lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
984
- lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
985
- lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
986
- lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
987
- on1, label1, tag1, md1 = get_lora_info(lora1)
988
- on2, label2, tag2, md2 = get_lora_info(lora2)
989
- on3, label3, tag3, md3 = get_lora_info(lora3)
990
- on4, label4, tag4, md4 = get_lora_info(lora4)
991
- on5, label5, tag5, md5 = get_lora_info(lora5)
992
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
993
- prompts = prompt.split(",") if prompt else []
994
- for p in prompts:
995
- p = str(p).strip()
996
- if "<lora" in p:
997
- result = re.findall(r'<lora:(.+?):(.+?)>', p)
998
- if not result: continue
999
- key = result[0][0]
1000
- wt = result[0][1]
1001
- path = to_lora_path(key)
1002
- if not key in loras_dict.keys() or not path:
1003
- path = get_valid_lora_name(path)
1004
- if not path or path == "None": continue
1005
- if path in lora_paths:
1006
- continue
1007
- elif not on1:
1008
- lora1 = path
1009
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
1010
- lora1_wt = safe_float(wt)
1011
- on1 = True
1012
- elif not on2:
1013
- lora2 = path
1014
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
1015
- lora2_wt = safe_float(wt)
1016
- on2 = True
1017
- elif not on3:
1018
- lora3 = path
1019
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
1020
- lora3_wt = safe_float(wt)
1021
- on3 = True
1022
- elif not on4:
1023
- lora4 = path
1024
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
1025
- lora4_wt = safe_float(wt)
1026
- on4, label4, tag4, md4 = get_lora_info(lora4)
1027
- elif not on5:
1028
- lora5 = path
1029
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
1030
- lora5_wt = safe_float(wt)
1031
- on5 = True
1032
- return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
1033
-
1034
-
1035
- def apply_lora_prompt(prompt: str, lora_info: str):
1036
- if lora_info == "None": return gr.update(value=prompt)
1037
- tags = prompt.split(",") if prompt else []
1038
- prompts = normalize_prompt_list(tags)
1039
- lora_tag = lora_info.replace("/",",")
1040
- lora_tags = lora_tag.split(",") if str(lora_info) != "None" else []
1041
- lora_prompts = normalize_prompt_list(lora_tags)
1042
- empty = [""]
1043
- prompt = ", ".join(list_uniq(prompts + lora_prompts) + empty)
1044
- return gr.update(value=prompt)
1045
-
1046
-
1047
- def update_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
1048
- import re
1049
- on1, label1, tag1, md1 = get_lora_info(lora1)
1050
- on2, label2, tag2, md2 = get_lora_info(lora2)
1051
- on3, label3, tag3, md3 = get_lora_info(lora3)
1052
- on4, label4, tag4, md4 = get_lora_info(lora4)
1053
- on5, label5, tag5, md5 = get_lora_info(lora5)
1054
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
1055
- prompts = prompt.split(",") if prompt else []
1056
- output_prompts = []
1057
- for p in prompts:
1058
- p = str(p).strip()
1059
- if "<lora" in p:
1060
- result = re.findall(r'<lora:(.+?):(.+?)>', p)
1061
- if not result: continue
1062
- key = result[0][0]
1063
- wt = result[0][1]
1064
- path = to_lora_path(key)
1065
- if not key in loras_dict.keys() or not path: continue
1066
- if path in lora_paths:
1067
- output_prompts.append(f"<lora:{to_lora_key(path)}:{safe_float(wt):.2f}>")
1068
- elif p:
1069
- output_prompts.append(p)
1070
- lora_prompts = []
1071
- if on1: lora_prompts.append(f"<lora:{to_lora_key(lora1)}:{lora1_wt:.2f}>")
1072
- if on2: lora_prompts.append(f"<lora:{to_lora_key(lora2)}:{lora2_wt:.2f}>")
1073
- if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
1074
- if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
1075
- if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
1076
- output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
1077
- choices = get_all_lora_tupled_list()
1078
- return gr.update(value=output_prompt), gr.update(value=lora1, choices=choices), gr.update(value=lora1_wt),\
1079
- gr.update(value=tag1, label=label1, visible=on1), gr.update(visible=on1), gr.update(value=md1, visible=on1),\
1080
- gr.update(value=lora2, choices=choices), gr.update(value=lora2_wt),\
1081
- gr.update(value=tag2, label=label2, visible=on2), gr.update(visible=on2), gr.update(value=md2, visible=on2),\
1082
- gr.update(value=lora3, choices=choices), gr.update(value=lora3_wt),\
1083
- gr.update(value=tag3, label=label3, visible=on3), gr.update(visible=on3), gr.update(value=md3, visible=on3),\
1084
- gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
1085
- gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
1086
- gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
1087
- gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
1088
-
1089
-
1090
- def search_civitai_lora(query, base_model=[], sort=CIVITAI_SORT[0], period=CIVITAI_PERIOD[0], tag="", user="", gallery=[]):
1091
- global civitai_last_results, civitai_last_choices, civitai_last_gallery
1092
- civitai_last_choices = [("", "")]
1093
- civitai_last_gallery = []
1094
- civitai_last_results = {}
1095
- items = search_lora_on_civitai(query, base_model, 100, sort, period, tag, user)
1096
- if not items: return gr.update(choices=[("", "")], value="", visible=False),\
1097
- gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
1098
- civitai_last_results = {}
1099
- choices = []
1100
- gallery = []
1101
- for item in items:
1102
- base_model_name = "Pony🐴" if item['base_model'] == "Pony" else item['base_model']
1103
- name = f"{item['name']} (for {base_model_name} / By: {item['creator']} / Tags: {', '.join(item['tags'])})"
1104
- value = item['dl_url']
1105
- choices.append((name, value))
1106
- gallery.append((item['img_url'], name))
1107
- civitai_last_results[value] = item
1108
- if not choices: return gr.update(choices=[("", "")], value="", visible=False),\
1109
- gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
1110
- civitai_last_choices = choices
1111
- civitai_last_gallery = gallery
1112
- result = civitai_last_results.get(choices[0][1], "None")
1113
- md = result['md'] if result else ""
1114
- return gr.update(choices=choices, value=choices[0][1], visible=True), gr.update(value=md, visible=True),\
1115
- gr.update(visible=True), gr.update(visible=True), gr.update(value=gallery)
1116
-
1117
-
1118
- def update_civitai_selection(evt: gr.SelectData):
1119
- try:
1120
- selected_index = evt.index
1121
- selected = civitai_last_choices[selected_index][1]
1122
- return gr.update(value=selected)
1123
- except Exception:
1124
- return gr.update(visible=True)
1125
-
1126
-
1127
- def select_civitai_lora(search_result):
1128
- if not "http" in search_result: return gr.update(value=""), gr.update(value="None", visible=True)
1129
- result = civitai_last_results.get(search_result, "None")
1130
- md = result['md'] if result else ""
1131
- return gr.update(value=search_result), gr.update(value=md, visible=True)
1132
-
1133
-
1134
- def search_civitai_lora_json(query, base_model):
1135
- results = {}
1136
- items = search_lora_on_civitai(query, base_model)
1137
- if not items: return gr.update(value=results)
1138
- for item in items:
1139
- results[item['dl_url']] = item
1140
- return gr.update(value=results)
1141
-
1142
-
1143
  quality_prompt_list = [
1144
  {
1145
  "name": "None",
 
5
  SCHEDULE_TYPE_OPTIONS,
6
  SCHEDULE_PREDICTION_TYPE_OPTIONS,
7
  check_scheduler_compatibility,
8
+ TASK_AND_PREPROCESSORS,
9
  )
10
  from constants import (
 
11
  TASK_STABLEPY,
12
  TASK_MODEL_LIST,
13
  UPSCALER_DICT_GUI,
 
17
  SDXL_TASK,
18
  MODEL_TYPE_TASK,
19
  POST_PROCESSING_SAMPLER,
20
+ DIFFUSERS_CONTROLNET_MODEL,
21
 
22
  )
23
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
 
37
  extract_exif_data,
38
  create_mask_now,
39
  download_diffuser_repo,
40
+ get_used_storage_gb,
41
+ delete_model,
42
  progress_step_bar,
43
  html_template_message,
44
  escape_html,
45
  )
46
+ from image_processor import preprocessor_tab
47
  from datetime import datetime
48
  import gradio as gr
49
  import logging
50
  import diffusers
51
  import warnings
52
  from stablepy import logger
53
+ from diffusers import FluxPipeline
54
  # import urllib.parse
55
 
56
  ImageFile.LOAD_TRUNCATED_IMAGES = True
57
+ torch.backends.cuda.matmul.allow_tf32 = True
58
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
59
  print(os.getenv("SPACES_ZERO_GPU"))
60
 
61
  ## BEGIN MOD
 
 
62
  logging.getLogger("diffusers").setLevel(logging.ERROR)
 
63
  diffusers.utils.logging.set_verbosity(40)
 
64
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
65
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
66
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
 
67
  logger.setLevel(logging.DEBUG)
68
 
69
  from env import (
 
110
 
111
  # Build list models
112
  embed_list = get_model_list(DIRECTORY_EMBEDS)
113
+ single_file_model_list = get_model_list(DIRECTORY_MODELS)
114
+ model_list = list_uniq(get_model_id_list() + LOAD_DIFFUSERS_FORMAT_MODEL + single_file_model_list)
115
 
116
  ## BEGIN MOD
117
  lora_model_list = get_lora_model_list()
 
129
 
130
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
131
 
132
+ flux_repo = "camenduru/FLUX.1-dev-diffusers"
133
+ flux_pipe = FluxPipeline.from_pretrained(
134
+ flux_repo,
135
+ transformer=None,
136
+ torch_dtype=torch.bfloat16,
137
+ ).to("cuda")
138
+ components = flux_pipe.components
139
+ components.pop("transformer", None)
140
+ delete_model(flux_repo)
141
+
142
  ## BEGIN MOD
143
  class GuiSD:
144
  def __init__(self, stream=True):
 
146
  self.status_loading = False
147
  self.sleep_loading = 4
148
  self.last_load = datetime.now()
149
+ self.inventory = []
150
+
151
+ def update_storage_models(self, storage_floor_gb=32, required_inventory_for_purge=3):
152
+ while get_used_storage_gb() > storage_floor_gb:
153
+ if len(self.inventory) < required_inventory_for_purge:
154
+ break
155
+ removal_candidate = self.inventory.pop(0)
156
+ delete_model(removal_candidate)
157
+
158
+ def update_inventory(self, model_name):
159
+ if model_name not in single_file_model_list:
160
+ self.inventory = [
161
+ m for m in self.inventory if m != model_name
162
+ ] + [model_name]
163
+ print(self.inventory)
164
+
165
+ def load_new_model(self, model_name, vae_model, task, controlnet_model, progress=gr.Progress(track_tqdm=True)):
166
+
167
+ # download link model > model_name
168
+
169
+ self.update_storage_models()
170
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  vae_model = vae_model if vae_model != "None" else None
172
  model_type = get_model_type(model_name)
173
  dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
 
180
  token=True,
181
  )
182
 
183
+ self.update_inventory(model_name)
184
+
185
  for i in range(68):
186
  if not self.status_loading:
187
  self.status_loading = True
 
219
  vae_model=vae_model,
220
  type_model_precision=dtype_model,
221
  retain_task_model_in_cache=False,
222
+ controlnet_model=controlnet_model,
223
  device="cpu",
224
+ env_components=components,
225
  )
226
+ self.model.advanced_params(image_preprocessor_cuda_active=True)
227
  else:
 
228
  if self.model.base_model_id != model_name:
229
  load_now_time = datetime.now()
230
  elapsed_time = max((load_now_time - self.last_load).total_seconds(), 0)
231
 
232
+ if elapsed_time <= 9:
233
  print("Waiting for the previous model's time ops...")
234
+ time.sleep(9 - elapsed_time)
235
 
236
  self.model.device = torch.device("cpu")
237
  self.model.load_pipe(
 
240
  vae_model=vae_model,
241
  type_model_precision=dtype_model,
242
  retain_task_model_in_cache=False,
243
+ controlnet_model=controlnet_model,
244
  )
245
 
246
  end_time = time.time()
 
277
  lora_scale4,
278
  lora5,
279
  lora_scale5,
280
+ lora6,
281
+ lora_scale6,
282
+ lora7,
283
+ lora_scale7,
284
  sampler,
285
  schedule_type,
286
  schedule_prediction_type,
 
301
  high_threshold,
302
  value_threshold,
303
  distance_threshold,
304
+ recolor_gamma_correction,
305
+ tile_blur_sigma,
306
  controlnet_output_scaling_in_unet,
307
  controlnet_start_threshold,
308
  controlnet_stop_threshold,
 
319
  hires_negative_prompt,
320
  hires_before_adetailer,
321
  hires_after_adetailer,
322
+ hires_schedule_type,
323
+ hires_guidance_scale,
324
+ controlnet_model,
325
  loop_generation,
326
  leave_progress_bar,
327
  disable_progress_bar,
 
363
  mask_blur_b,
364
  mask_padding_b,
365
  retain_task_cache_gui,
366
+ guidance_rescale,
367
  image_ip1,
368
  mask_ip1,
369
  model_ip1,
 
380
  yield info_state, gr.update(), gr.update()
381
 
382
  vae_model = vae_model if vae_model != "None" else None
383
+ loras_list = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
384
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
385
  msg_lora = ""
386
 
 
489
  "high_threshold": high_threshold,
490
  "value_threshold": value_threshold,
491
  "distance_threshold": distance_threshold,
492
+ "recolor_gamma_correction": float(recolor_gamma_correction),
493
+ "tile_blur_sigma": int(tile_blur_sigma),
494
  "lora_A": lora1 if lora1 != "None" else None,
495
  "lora_scale_A": lora_scale1,
496
  "lora_B": lora2 if lora2 != "None" else None,
 
501
  "lora_scale_D": lora_scale4,
502
  "lora_E": lora5 if lora5 != "None" else None,
503
  "lora_scale_E": lora_scale5,
504
+ "lora_F": lora6 if lora6 != "None" else None,
505
+ "lora_scale_F": lora_scale6,
506
+ "lora_G": lora7 if lora7 != "None" else None,
507
+ "lora_scale_G": lora_scale7,
508
  ## BEGIN MOD
509
  "textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
510
  ## END MOD
 
548
  "hires_sampler": hires_sampler,
549
  "hires_before_adetailer": hires_before_adetailer,
550
  "hires_after_adetailer": hires_after_adetailer,
551
+ "hires_schedule_type": hires_schedule_type,
552
+ "hires_guidance_scale": hires_guidance_scale,
553
  "ip_adapter_image": params_ip_img,
554
  "ip_adapter_mask": params_ip_msk,
555
  "ip_adapter_model": params_ip_model,
 
557
  "ip_adapter_scale": params_ip_scale,
558
  }
559
 
560
+ # kwargs for diffusers pipeline
561
+ if guidance_rescale:
562
+ pipe_params["guidance_rescale"] = guidance_rescale
563
+
564
  self.model.device = torch.device("cuda:0")
565
+ if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * self.model.num_loras:
566
  self.model.pipe.transformer.to(self.model.device)
567
  print("transformer to cuda")
568
 
 
 
569
  actual_progress = 0
570
  info_images = gr.update()
571
  for img, [seed, image_path, metadata] in self.model(**pipe_params):
 
590
  if msg_lora:
591
  info_images += msg_lora
592
 
593
+ info_images = info_images + "<br>" + "GENERATION DATA:<br>" + escape_html(metadata[-1]) + "<br>-------<br>"
594
 
595
  download_links = "<br>".join(
596
  [
 
625
 
626
 
627
  def sd_gen_generate_pipeline(*args):
 
628
  gpu_duration_arg = int(args[-1]) if args[-1] else 59
629
  verbose_arg = int(args[-2])
630
  load_lora_cpu = args[-3]
631
  generation_args = args[:-3]
632
  lora_list = [
633
  None if item == "None" or item == "" else item # MOD
634
+ for item in [args[7], args[9], args[11], args[13], args[15], args[17], args[19]]
635
  ]
636
+ lora_status = [None] * sd_gen.model.num_loras
637
 
638
  msg_load_lora = "Updating LoRAs in GPU..."
639
  if load_lora_cpu:
640
+ msg_load_lora = "Updating LoRAs in CPU..."
641
 
642
+ if lora_list != sd_gen.model.lora_memory and lora_list != [None] * sd_gen.model.num_loras:
643
  yield msg_load_lora, gr.update(), gr.update()
644
 
645
  # Load lora in CPU
646
  if load_lora_cpu:
647
+ lora_status = sd_gen.model.load_lora_on_the_fly(
648
  lora_A=lora_list[0], lora_scale_A=args[8],
649
  lora_B=lora_list[1], lora_scale_B=args[10],
650
  lora_C=lora_list[2], lora_scale_C=args[12],
651
  lora_D=lora_list[3], lora_scale_D=args[14],
652
  lora_E=lora_list[4], lora_scale_E=args[16],
653
+ lora_F=lora_list[5], lora_scale_F=args[18],
654
+ lora_G=lora_list[6], lora_scale_G=args[20],
655
  )
656
  print(lora_status)
657
 
658
+ sampler_name = args[21]
659
+ schedule_type_name = args[22]
660
  _, _, msg_sampler = check_scheduler_compatibility(
661
  sd_gen.model.class_name, sampler_name, schedule_type_name
662
  )
 
670
  elif status is not None:
671
  gr.Warning(f"Failed to load LoRA: {lora}")
672
 
673
+ if lora_status == [None] * sd_gen.model.num_loras and sd_gen.model.lora_memory != [None] * sd_gen.model.num_loras and load_lora_cpu:
674
  lora_cache_msg = ", ".join(
675
  str(x) for x in sd_gen.model.lora_memory if x is not None
676
  )
 
686
 
687
  # yield from sd_gen.generate_pipeline(*generation_args)
688
  yield from dynamic_gpu_duration(
 
689
  sd_gen.generate_pipeline,
690
  gpu_duration_arg,
691
  *generation_args,
 
727
  return image_path
728
 
729
 
730
+ # https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
731
  dynamic_gpu_duration.zerogpu = True
732
  sd_gen_generate_pipeline.zerogpu = True
733
  sd_gen = GuiSD()
 
740
  import random
741
  import json
742
  import shutil
743
+ from tagger.tagger import insert_model_recom_prompt
744
+ from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path, valid_model_name,
745
  get_local_model_list, get_private_lora_model_lists, get_valid_lora_name, get_state, set_state,
746
  get_valid_lora_path, get_valid_lora_wt, get_lora_info, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
747
+ normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history,
748
+ get_all_lora_list, get_all_lora_tupled_list, update_lora_dict, download_lora, copy_lora, download_my_lora, set_prompt_loras,
749
+ apply_lora_prompt, update_loras, search_civitai_lora, search_civitai_lora_json, update_civitai_selection, select_civitai_lora)
750
 
751
 
752
  #@spaces.GPU
753
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
754
+ model_name=load_diffusers_format_model[0], lora1=None, lora1_wt=1.0, lora2=None, lora2_wt=1.0,
755
+ lora3=None, lora3_wt=1.0, lora4=None, lora4_wt=1.0, lora5=None, lora5_wt=1.0, lora6=None, lora6_wt=1.0, lora7=None, lora7_wt=1.0,
756
+ sampler="Euler", vae=None, translate=False, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
757
+ clip_skip=True, pag_scale=0.0, free_u=False, guidance_rescale=0., gpu_duration=59, recom_prompt=True, progress=gr.Progress(track_tqdm=True)):
758
  MAX_SEED = np.iinfo(np.int32).max
759
 
760
+ task = TASK_MODEL_LIST[0]
761
+ image_control = None
762
+ preprocessor_name = "Canny"
763
+ preprocess_resolution = 512
764
+ image_resolution = 1024
765
+ style_prompt = None
766
+ style_json = None
767
+ image_mask = None
768
+ strength = 0.35
769
+ low_threshold = 100
770
+ high_threshold = 200
771
+ value_threshold = 0.1
772
+ distance_threshold = 0.1
773
+ recolor_gamma_correction = 1.
774
+ tile_blur_sigma = 9
775
+ control_net_output_scaling = 1.0
776
+ control_net_start_threshold = 0.
777
+ control_net_stop_threshold = 1.
778
+ active_textual_inversion = False
779
+ prompt_syntax = "Classic"
780
+ upscaler_model_path = None # UPSCALER_KEYS[0]
781
+ upscaler_increases_size = 1.0 # 1.2
782
+ esrgan_tile = 5
783
+ esrgan_tile_overlap = 8
784
+ hires_steps = 30
785
+ hires_denoising_strength = 0.55
786
+ hires_sampler = "Use same sampler" # POST_PROCESSING_SAMPLER[0]
787
+ hires_prompt = ""
788
+ hires_negative_prompt = ""
789
+ hires_before_adetailer = False
790
+ hires_after_adetailer = True
791
+ hires_schedule_list = ["Use same schedule type"] + SCHEDULE_TYPE_OPTIONS
792
+ hires_schedule_type = hires_schedule_list[0]
793
+ hires_guidance_scale = -1
794
+ controlnet_model = DIFFUSERS_CONTROLNET_MODEL[0]
795
+ loop_generation = 1
796
+ leave_progress_bar = True
797
+ disable_progress_bar = False
798
  image_previews = True
799
+ display_images = False
800
+ save_generated_images = False
801
+ filename_pattern = "model,seed"
802
+ image_storage_location = "./images"
803
+ retain_compel_previous_load = False
804
+ retain_detailfix_model_previous_load = False
805
+ retain_hires_model_previous_load = False
806
+ t2i_adapter_preprocessor = True
807
+ adapter_conditioning_scale = 1
808
+ adapter_conditioning_factor = 0.55
809
+ xformers_memory_efficient_attention = False
810
+ generator_in_cpu = False
811
+ adetailer_inpaint_only = True
812
+ adetailer_verbose = False
813
+ adetailer_sampler = "Use same sampler"
814
+ adetailer_active_a = False
815
+ prompt_ad_a = ""
816
+ negative_prompt_ad_a = ""
817
+ strength_ad_a = 0.35
818
+ face_detector_ad_a = True
819
+ person_detector_ad_a = True
820
+ hand_detector_ad_a = False
821
+ mask_dilation_a = 4
822
+ mask_blur_a = 4
823
+ mask_padding_a = 32
824
+ adetailer_active_b = False
825
+ prompt_ad_b = ""
826
+ negative_prompt_ad_b = ""
827
+ strength_ad_b = 0.35
828
+ face_detector_ad_b = True
829
+ person_detector_ad_b = True
830
+ hand_detector_ad_b = False
831
+ mask_dilation_b = 4
832
+ mask_blur_b = 4
833
+ mask_padding_b = 32
834
+ retain_task_cache = True
835
+ image_ip1 = None
836
+ mask_ip1 = None
837
+ model_ip1 = "plus_face"
838
+ mode_ip1 = "original"
839
+ scale_ip1 = 0.7
840
+ image_ip2 = None
841
+ mask_ip2 = None
842
+ model_ip2 = "base"
843
+ mode_ip2 = "style"
844
+ scale_ip2 = 0.7
845
  load_lora_cpu = False
846
  verbose_info = False
 
 
847
 
848
  images: list[tuple[PIL.Image.Image, str | None]] = []
849
  progress(0, desc="Preparing...")
850
 
851
+ if randomize_seed: seed = random.randint(0, MAX_SEED)
 
 
852
  generator = torch.Generator().manual_seed(seed).seed()
853
 
854
  if translate:
 
857
 
858
  prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name, recom_prompt)
859
  progress(0.5, desc="Preparing...")
860
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt = \
861
+ set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt)
862
  lora1 = get_valid_lora_path(lora1)
863
  lora2 = get_valid_lora_path(lora2)
864
  lora3 = get_valid_lora_path(lora3)
865
  lora4 = get_valid_lora_path(lora4)
866
  lora5 = get_valid_lora_path(lora5)
867
+ lora6 = get_valid_lora_path(lora6)
868
+ lora7 = get_valid_lora_path(lora7)
869
  progress(1, desc="Preparation completed. Starting inference...")
870
 
871
  progress(0, desc="Loading model...")
872
+ for _ in sd_gen.load_new_model(valid_model_name(model_name), vae, task, controlnet_model):
873
  pass
874
  progress(1, desc="Model loaded.")
875
  progress(0, desc="Starting Inference...")
876
  for info_state, stream_images, info_images in sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
877
+ guidance_scale, clip_skip, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
878
+ lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt, sampler, schedule_type, schedule_prediction_type,
879
+ height, width, model_name, vae, task, image_control, preprocessor_name, preprocess_resolution, image_resolution,
880
+ style_prompt, style_json, image_mask, strength, low_threshold, high_threshold, value_threshold, distance_threshold,
881
+ recolor_gamma_correction, tile_blur_sigma, control_net_output_scaling, control_net_start_threshold, control_net_stop_threshold,
882
+ active_textual_inversion, prompt_syntax, upscaler_model_path, upscaler_increases_size, esrgan_tile, esrgan_tile_overlap,
883
+ hires_steps, hires_denoising_strength, hires_sampler, hires_prompt, hires_negative_prompt, hires_before_adetailer, hires_after_adetailer,
884
+ hires_schedule_type, hires_guidance_scale, controlnet_model, loop_generation, leave_progress_bar, disable_progress_bar, image_previews,
885
+ display_images, save_generated_images, filename_pattern, image_storage_location, retain_compel_previous_load, retain_detailfix_model_previous_load,
886
+ retain_hires_model_previous_load, t2i_adapter_preprocessor, adapter_conditioning_scale, adapter_conditioning_factor, xformers_memory_efficient_attention,
887
+ free_u, generator_in_cpu, adetailer_inpaint_only, adetailer_verbose, adetailer_sampler, adetailer_active_a, prompt_ad_a, negative_prompt_ad_a,
888
+ strength_ad_a, face_detector_ad_a, person_detector_ad_a, hand_detector_ad_a, mask_dilation_a, mask_blur_a, mask_padding_a,
889
+ adetailer_active_b, prompt_ad_b, negative_prompt_ad_b, strength_ad_b, face_detector_ad_b, person_detector_ad_b, hand_detector_ad_b,
890
+ mask_dilation_b, mask_blur_b, mask_padding_b, retain_task_cache, guidance_rescale, image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1,
891
+ image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2, pag_scale, load_lora_cpu, verbose_info, gpu_duration
892
  ):
893
  images = stream_images if isinstance(stream_images, list) else images
894
  progress(1, desc="Inference completed.")
 
899
 
900
  #@spaces.GPU
901
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
902
+ model_name=load_diffusers_format_model[0], lora1=None, lora1_wt=1.0, lora2=None, lora2_wt=1.0,
903
+ lora3=None, lora3_wt=1.0, lora4=None, lora4_wt=1.0, lora5=None, lora5_wt=1.0, lora6=None, lora6_wt=1.0, lora7=None, lora7_wt=1.0,
904
+ sampler="Euler", vae=None, translate=False, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
905
+ clip_skip=True, pag_scale=0.0, free_u=False, guidance_rescale=0., gpu_duration=59, recom_prompt=True, progress=gr.Progress(track_tqdm=True)):
906
  return gr.update()
907
 
908
 
 
945
  return gr.update(value=is_enable), gr.update(value=new_value, choices=get_diffusers_model_list(state)), state
946
 
947
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
948
  quality_prompt_list = [
949
  {
950
  "name": "None",
image_processor.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ from stablepy import Preprocessor
4
+
5
+ PREPROCESSOR_TASKS_LIST = [
6
+ "Canny",
7
+ "Openpose",
8
+ "DPT",
9
+ "Midas",
10
+ "ZoeDepth",
11
+ "DepthAnything",
12
+ "HED",
13
+ "PidiNet",
14
+ "TEED",
15
+ "Lineart",
16
+ "LineartAnime",
17
+ "Anyline",
18
+ "Lineart standard",
19
+ "SegFormer",
20
+ "UPerNet",
21
+ "ContentShuffle",
22
+ "Recolor",
23
+ "Blur",
24
+ "MLSD",
25
+ "NormalBae",
26
+ ]
27
+
28
+ preprocessor = Preprocessor()
29
+
30
+
31
+ def process_inputs(
32
+ image,
33
+ name,
34
+ resolution,
35
+ precessor_resolution,
36
+ low_threshold,
37
+ high_threshold,
38
+ value_threshod,
39
+ distance_threshold,
40
+ recolor_mode,
41
+ recolor_gamma_correction,
42
+ blur_k_size,
43
+ pre_openpose_extra,
44
+ hed_scribble,
45
+ pre_pidinet_safe,
46
+ pre_lineart_coarse,
47
+ use_cuda,
48
+ ):
49
+ if not image:
50
+ raise ValueError("To use this, simply upload an image.")
51
+
52
+ preprocessor.load(name, False)
53
+
54
+ params = dict(
55
+ image_resolution=resolution,
56
+ detect_resolution=precessor_resolution,
57
+ low_threshold=low_threshold,
58
+ high_threshold=high_threshold,
59
+ thr_v=value_threshod,
60
+ thr_d=distance_threshold,
61
+ mode=recolor_mode,
62
+ gamma_correction=recolor_gamma_correction,
63
+ blur_sigma=blur_k_size,
64
+ hand_and_face=pre_openpose_extra,
65
+ scribble=hed_scribble,
66
+ safe=pre_pidinet_safe,
67
+ coarse=pre_lineart_coarse,
68
+ )
69
+
70
+ if use_cuda:
71
+ @spaces.GPU(duration=15)
72
+ def wrapped_func():
73
+ preprocessor.to("cuda")
74
+ return preprocessor(image, **params)
75
+ return wrapped_func()
76
+
77
+ return preprocessor(image, **params)
78
+
79
+
80
+ def preprocessor_tab():
81
+ with gr.Row():
82
+ with gr.Column():
83
+ pre_image = gr.Image(label="Image", type="pil", sources=["upload"])
84
+ pre_options = gr.Dropdown(label="Preprocessor", choices=PREPROCESSOR_TASKS_LIST, value=PREPROCESSOR_TASKS_LIST[0])
85
+ pre_img_resolution = gr.Slider(
86
+ minimum=64, maximum=4096, step=64, value=1024, label="Image Resolution",
87
+ info="The maximum proportional size of the generated image based on the uploaded image."
88
+ )
89
+ pre_start = gr.Button(value="PROCESS IMAGE", variant="primary")
90
+ with gr.Accordion("Advanced Settings", open=False):
91
+ with gr.Column():
92
+ pre_processor_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
93
+ pre_low_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
94
+ pre_high_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
95
+ pre_value_threshold = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
96
+ pre_distance_threshold = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
97
+ pre_recolor_mode = gr.Dropdown(label="'RECOLOR' mode", choices=["luminance", "intensity"], value="luminance")
98
+ pre_recolor_gamma_correction = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
99
+ pre_blur_k_size = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'BLUR' sigma")
100
+ pre_openpose_extra = gr.Checkbox(value=True, label="'OPENPOSE' face and hand")
101
+ pre_hed_scribble = gr.Checkbox(value=False, label="'HED' scribble")
102
+ pre_pidinet_safe = gr.Checkbox(value=False, label="'PIDINET' safe")
103
+ pre_lineart_coarse = gr.Checkbox(value=False, label="'LINEART' coarse")
104
+ pre_use_cuda = gr.Checkbox(value=False, label="Use CUDA")
105
+
106
+ with gr.Column():
107
+ pre_result = gr.Image(label="Result", type="pil", interactive=False, format="png")
108
+
109
+ pre_start.click(
110
+ fn=process_inputs,
111
+ inputs=[
112
+ pre_image,
113
+ pre_options,
114
+ pre_img_resolution,
115
+ pre_processor_resolution,
116
+ pre_low_threshold,
117
+ pre_high_threshold,
118
+ pre_value_threshold,
119
+ pre_distance_threshold,
120
+ pre_recolor_mode,
121
+ pre_recolor_gamma_correction,
122
+ pre_blur_k_size,
123
+ pre_openpose_extra,
124
+ pre_hed_scribble,
125
+ pre_pidinet_safe,
126
+ pre_lineart_coarse,
127
+ pre_use_cuda,
128
+ ],
129
+ outputs=[pre_result],
130
+ )
llmdolphin.py CHANGED
@@ -27,6 +27,7 @@ llm_models = {
27
  "mn-12b-lyra-v2a1-q5_k_m.gguf": ["HalleyStarbun/MN-12B-Lyra-v2a1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
28
  "L3-8B-Tamamo-v1.i1-Q5_K_M.gguf": ["mradermacher/L3-8B-Tamamo-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
29
  "MN-Chinofun-12B-2.i1-Q4_K_M.gguf": ["mradermacher/MN-Chinofun-12B-2-i1-GGUF", MessagesFormatterType.MISTRAL],
 
30
  "Mahou-1.5-mistral-nemo-12B.i1-Q4_K_M.gguf": ["mradermacher/Mahou-1.5-mistral-nemo-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
31
  "MN-12B-Mag-Mell-Q4_K_M.gguf": ["inflatebot/MN-12B-Mag-Mell-R1-GGUF", MessagesFormatterType.MISTRAL],
32
  "Qwen-modelstock-15B.i1-Q4_K_M.gguf": ["mradermacher/Qwen-modelstock-15B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
@@ -51,6 +52,7 @@ llm_models = {
51
  "Nemo-12B-Marlin-v7.Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v7-GGUF", MessagesFormatterType.MISTRAL],
52
  "Nemo-12B-Marlin-v8.Q4_K_S.gguf": ["mradermacher/Nemo-12B-Marlin-v8-GGUF", MessagesFormatterType.MISTRAL],
53
  "NemoDori-v0.2-Upscaled.1-14B.Q4_K_M.gguf": ["mradermacher/NemoDori-v0.2-Upscaled.1-14B-GGUF", MessagesFormatterType.MISTRAL],
 
54
  "Fireball-12B-v1.0.i1-Q4_K_M.gguf": ["mradermacher/Fireball-12B-v1.0-i1-GGUF", MessagesFormatterType.MISTRAL],
55
  "Fireball-Mistral-Nemo-Base-2407-sft-v2.2a.Q4_K_M.gguf": ["mradermacher/Fireball-Mistral-Nemo-Base-2407-sft-v2.2a-GGUF", MessagesFormatterType.MISTRAL],
56
  "T-III-12B.Q4_K_M.gguf": ["mradermacher/T-III-12B-GGUF", MessagesFormatterType.CHATML],
@@ -70,12 +72,264 @@ llm_models = {
70
  "Rocinante-12B-v2h-Q4_K_M.gguf": ["BeaverAI/Rocinante-12B-v2h-GGUF", MessagesFormatterType.MISTRAL],
71
  "Mistral-Nemo-12B-ArliAI-RPMax-v1.1.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-12B-ArliAI-RPMax-v1.1-i1-GGUF", MessagesFormatterType.MISTRAL],
72
  "Pans_Gutenbergum_V0.1.Q4_K_M.gguf": ["mradermacher/Pans_Gutenbergum_V0.1-GGUF", MessagesFormatterType.MISTRAL],
 
 
 
 
 
73
  "Trinas_Nectar-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Trinas_Nectar-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
74
  "ChatWaifu_Magnum_V0.2.Q4_K_M.gguf": ["mradermacher/ChatWaifu_Magnum_V0.2-GGUF", MessagesFormatterType.MISTRAL],
75
  "ChatWaifu_12B_v2.0.Q5_K_M.gguf": ["mradermacher/ChatWaifu_12B_v2.0-GGUF", MessagesFormatterType.MISTRAL],
76
  "ChatWaifu_22B_v2.0_preview.Q4_K_S.gguf": ["mradermacher/ChatWaifu_22B_v2.0_preview-GGUF", MessagesFormatterType.MISTRAL],
77
  "ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
78
  "ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  "dolphin-2.6-mistral-7b-dpo-laser.Q4_K_S.gguf": ["mradermacher/dolphin-2.6-mistral-7b-dpo-laser-GGUF", MessagesFormatterType.MISTRAL],
80
  "Flowable-Docs-Llama-3.1-8B.Q5_K_M.gguf": ["mradermacher/Flowable-Docs-Llama-3.1-8B-GGUF", MessagesFormatterType.LLAMA_3],
81
  "slimorca-gemma2-9b-fft.Q4_K_M.gguf": ["mradermacher/slimorca-gemma2-9b-fft-GGUF", MessagesFormatterType.ALPACA],
@@ -892,6 +1146,7 @@ llm_models = {
892
  "Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW_iMat_Ch200_IQ4_XS.gguf": ["dddump/Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW-gguf", MessagesFormatterType.VICUNA],
893
  "ChatWaifu_v1.2.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.2.1-GGUF", MessagesFormatterType.MISTRAL],
894
  "ChatWaifu_v1.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.1-GGUF", MessagesFormatterType.MISTRAL],
 
895
  "Ninja-V2-7B_Q4_K_M.gguf": ["Local-Novel-LLM-project/Ninja-V2-7B-GGUF", MessagesFormatterType.VICUNA],
896
  "Yamase-12B.Q4_K_M.gguf": ["mradermacher/Yamase-12B-GGUF", MessagesFormatterType.MISTRAL],
897
  "borea-phi-3.5-mini-instruct-common.Q5_K_M.gguf": ["keitokei1994/Borea-Phi-3.5-mini-Instruct-Common-GGUF", MessagesFormatterType.PHI_3],
 
27
  "mn-12b-lyra-v2a1-q5_k_m.gguf": ["HalleyStarbun/MN-12B-Lyra-v2a1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
28
  "L3-8B-Tamamo-v1.i1-Q5_K_M.gguf": ["mradermacher/L3-8B-Tamamo-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
29
  "MN-Chinofun-12B-2.i1-Q4_K_M.gguf": ["mradermacher/MN-Chinofun-12B-2-i1-GGUF", MessagesFormatterType.MISTRAL],
30
+ "Mistral-Nemo-Prism-12B-v2.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-Prism-12B-v2-i1-GGUF", MessagesFormatterType.MISTRAL],
31
  "Mahou-1.5-mistral-nemo-12B.i1-Q4_K_M.gguf": ["mradermacher/Mahou-1.5-mistral-nemo-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
32
  "MN-12B-Mag-Mell-Q4_K_M.gguf": ["inflatebot/MN-12B-Mag-Mell-R1-GGUF", MessagesFormatterType.MISTRAL],
33
  "Qwen-modelstock-15B.i1-Q4_K_M.gguf": ["mradermacher/Qwen-modelstock-15B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
 
52
  "Nemo-12B-Marlin-v7.Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v7-GGUF", MessagesFormatterType.MISTRAL],
53
  "Nemo-12B-Marlin-v8.Q4_K_S.gguf": ["mradermacher/Nemo-12B-Marlin-v8-GGUF", MessagesFormatterType.MISTRAL],
54
  "NemoDori-v0.2-Upscaled.1-14B.Q4_K_M.gguf": ["mradermacher/NemoDori-v0.2-Upscaled.1-14B-GGUF", MessagesFormatterType.MISTRAL],
55
+ "MT-Gen2-GIMMMA-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT-Gen2-GIMMMA-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
56
  "Fireball-12B-v1.0.i1-Q4_K_M.gguf": ["mradermacher/Fireball-12B-v1.0-i1-GGUF", MessagesFormatterType.MISTRAL],
57
  "Fireball-Mistral-Nemo-Base-2407-sft-v2.2a.Q4_K_M.gguf": ["mradermacher/Fireball-Mistral-Nemo-Base-2407-sft-v2.2a-GGUF", MessagesFormatterType.MISTRAL],
58
  "T-III-12B.Q4_K_M.gguf": ["mradermacher/T-III-12B-GGUF", MessagesFormatterType.CHATML],
 
72
  "Rocinante-12B-v2h-Q4_K_M.gguf": ["BeaverAI/Rocinante-12B-v2h-GGUF", MessagesFormatterType.MISTRAL],
73
  "Mistral-Nemo-12B-ArliAI-RPMax-v1.1.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-12B-ArliAI-RPMax-v1.1-i1-GGUF", MessagesFormatterType.MISTRAL],
74
  "Pans_Gutenbergum_V0.1.Q4_K_M.gguf": ["mradermacher/Pans_Gutenbergum_V0.1-GGUF", MessagesFormatterType.MISTRAL],
75
+ "AbominationScience-12B-v4.i1-Q4_K_M.gguf": ["mradermacher/AbominationScience-12B-v4-i1-GGUF", MessagesFormatterType.MISTRAL],
76
+ "ChronoStar-Unleashed-v0.1.i1-Q4_K_M.gguf": ["mradermacher/ChronoStar-Unleashed-v0.1-i1-GGUF", MessagesFormatterType.MISTRAL],
77
+ "Chatty-Harry_V3.0.i1-Q4_K_M.gguf": ["mradermacher/Chatty-Harry_V3.0-i1-GGUF", MessagesFormatterType.MISTRAL],
78
+ "Tora-12B.i1-Q4_K_M.gguf": ["mradermacher/Tora-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
79
+ "ChatML-Nemo-Pro-V2.i1-Q4_K_M.gguf": ["mradermacher/ChatML-Nemo-Pro-V2-i1-GGUF", MessagesFormatterType.MISTRAL],
80
  "Trinas_Nectar-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Trinas_Nectar-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
81
  "ChatWaifu_Magnum_V0.2.Q4_K_M.gguf": ["mradermacher/ChatWaifu_Magnum_V0.2-GGUF", MessagesFormatterType.MISTRAL],
82
  "ChatWaifu_12B_v2.0.Q5_K_M.gguf": ["mradermacher/ChatWaifu_12B_v2.0-GGUF", MessagesFormatterType.MISTRAL],
83
  "ChatWaifu_22B_v2.0_preview.Q4_K_S.gguf": ["mradermacher/ChatWaifu_22B_v2.0_preview-GGUF", MessagesFormatterType.MISTRAL],
84
  "ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
85
  "ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
86
+ "Lamarck-14B-v0.2-experimental.Q4_K_M.gguf": ["mradermacher/Lamarck-14B-v0.2-experimental-GGUF", MessagesFormatterType.OPEN_CHAT],
87
+ "Llama3.1-Reddit-Writer-8B.Q5_K_M.gguf": ["mradermacher/Llama3.1-Reddit-Writer-8B-GGUF", MessagesFormatterType.LLAMA_3],
88
+ "Franken-MistressMaid-10.5B-v2.i1-Q4_K_M.gguf": ["mradermacher/Franken-MistressMaid-10.5B-v2-i1-GGUF", MessagesFormatterType.MISTRAL],
89
+ "Mercury_In_Retrograde-ALT-8b-Model-Stock.i1-Q4_K_M.gguf": ["mradermacher/Mercury_In_Retrograde-ALT-8b-Model-Stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
90
+ "Virtuoso-Small.i1-Q4_K_M.gguf": ["mradermacher/Virtuoso-Small-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
91
+ "Tuldur-8B.Q4_K_M.gguf": ["mradermacher/Tuldur-8B-GGUF", MessagesFormatterType.LLAMA_3],
92
+ "Orbita-v0.1.i1-Q4_K_M.gguf": ["mradermacher/Orbita-v0.1-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
93
+ "Violet_Eris-BMO-12B.i1-Q4_K_M.gguf": ["mradermacher/Violet_Eris-BMO-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
94
+ "Mistral-Darwin-7b-v0.1.i1-Q5_K_M.gguf": ["mradermacher/Mistral-Darwin-7b-v0.1-i1-GGUF", MessagesFormatterType.MISTRAL],
95
+ "PrimaSumika-10.7B-128k.Q4_K_M.gguf": ["mradermacher/PrimaSumika-10.7B-128k-GGUF", MessagesFormatterType.MISTRAL],
96
+ "L3-Umbral-Mind-RP-v2-8B.i1-Q5_K_M.gguf": ["mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
97
+ "Arch-Function-7B.i1-Q5_K_M.gguf": ["mradermacher/Arch-Function-7B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
98
+ "Llama-3-Nerdy-RP-8B.i1-Q5_K_M.gguf": ["mradermacher/Llama-3-Nerdy-RP-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
99
+ "magnum-twilight-12b.i1-Q4_K_M.gguf": ["mradermacher/magnum-twilight-12b-i1-GGUF", MessagesFormatterType.MISTRAL],
100
+ "Qwen2.5-Ultimate-14B-Instruct.i1-Q4_K_M.gguf": ["mradermacher/Qwen2.5-Ultimate-14B-Instruct-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
101
+ "ContaLLM-Beauty-8B-Instruct.i1-Q5_K_M.gguf": ["mradermacher/ContaLLM-Beauty-8B-Instruct-i1-GGUF", MessagesFormatterType.LLAMA_3],
102
+ "Eidolon-v3.1-14B-deconditioned.Q4_K_M.gguf": ["mradermacher/Eidolon-v3.1-14B-deconditioned-GGUF", MessagesFormatterType.OPEN_CHAT],
103
+ "ZEUS-8B-V2L2.i1-Q5_K_M.gguf": ["mradermacher/ZEUS-8B-V2L2-i1-GGUF", MessagesFormatterType.LLAMA_3],
104
+ "Rocinante-Prism_V2.0.Q4_K_M.gguf": ["mradermacher/Rocinante-Prism_V2.0-GGUF", MessagesFormatterType.MISTRAL],
105
+ "Rocinante-Prism_V2.1.Q4_K_M.gguf": ["mradermacher/Rocinante-Prism_V2.1-GGUF", MessagesFormatterType.MISTRAL],
106
+ "Virtuoso-Small-Q4_K_M.gguf": ["bartowski/Virtuoso-Small-GGUF", MessagesFormatterType.OPEN_CHAT],
107
+ "ZEUS-8B-V2.i1-Q5_K_M.gguf": ["mradermacher/ZEUS-8B-V2-i1-GGUF", MessagesFormatterType.LLAMA_3],
108
+ "Lamarck-14B-v0.1-experimental.i1-Q4_K_M.gguf": ["mradermacher/Lamarck-14B-v0.1-experimental-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
109
+ "patricide-12B-Unslop-Mell.Q4_K_M.gguf": ["mradermacher/patricide-12B-Unslop-Mell-GGUF", MessagesFormatterType.MISTRAL],
110
+ "Eidolon-v3.1-14B.Q4_K_M.gguf": ["mradermacher/Eidolon-v3.1-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
111
+ "Frigg-v1.4-8b-HIGH-FANTASY.Q5_K_M.gguf": ["mradermacher/Frigg-v1.4-8b-HIGH-FANTASY-GGUF", MessagesFormatterType.LLAMA_3],
112
+ "Thor-v1.4-8b-DARK-FICTION.i1-Q5_K_M.gguf": ["mradermacher/Thor-v1.4-8b-DARK-FICTION-i1-GGUF", MessagesFormatterType.LLAMA_3],
113
+ "QwenMosaic-7B.i1-Q5_K_M.gguf": ["mradermacher/QwenMosaic-7B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
114
+ "Qwen2.5-7B-Spanish-0.2.i1-Q5_K_M.gguf": ["mradermacher/Qwen2.5-7B-Spanish-0.2-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
115
+ "IceDrunkenCherryRP-7b-Q5_K_M.gguf": ["bartowski/IceDrunkenCherryRP-7b-GGUF", MessagesFormatterType.MISTRAL],
116
+ "SmolLumi-8B-Instruct.i1-Q5_K_M.gguf": ["mradermacher/SmolLumi-8B-Instruct-i1-GGUF", MessagesFormatterType.LLAMA_3],
117
+ "Capt-Dark-Science-12B.i1-Q4_K_S.gguf": ["mradermacher/Capt-Dark-Science-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
118
+ "Thor-v1.4-8b-DARK-FANTASY.i1-Q4_K_M.gguf": ["mradermacher/Thor-v1.4-8b-DARK-FANTASY-i1-GGUF", MessagesFormatterType.LLAMA_3],
119
+ "Mayo.Q5_K_M.gguf": ["mradermacher/Mayo-GGUF", MessagesFormatterType.MISTRAL],
120
+ "Ella-9B.i1-Q4_K_M.gguf": ["mradermacher/Ella-9B-i1-GGUF", MessagesFormatterType.ALPACA],
121
+ "miscii-14b-1028-Q4_K_M.gguf": ["bartowski/miscii-14b-1028-GGUF", MessagesFormatterType.OPEN_CHAT],
122
+ "SeaMarco-o1-7B-v1.Q4_K_M.gguf": ["mradermacher/SeaMarco-o1-7B-v1-GGUF", MessagesFormatterType.OPEN_CHAT],
123
+ "L3.1-RP-Hero-InBetween-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-RP-Hero-InBetween-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
124
+ "SeQwence-14Bv4.Q4_K_M.gguf": ["mradermacher/SeQwence-14Bv4-GGUF", MessagesFormatterType.OPEN_CHAT],
125
+ "QwenStock2-14B.Q4_K_M.gguf": ["mradermacher/QwenStock2-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
126
+ "Dark-Science-12B.i1-Q4_K_S.gguf": ["mradermacher/Dark-Science-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
127
+ "Liberated-Qwen1.5-14B.Q4_K_M.gguf": ["mradermacher/Liberated-Qwen1.5-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
128
+ "Blur-7b-slerp-v1.4.Q5_K_M.gguf": ["mradermacher/Blur-7b-slerp-v1.4-GGUF", MessagesFormatterType.MISTRAL],
129
+ "Llama-3-linear-8B.Q5_K_M.gguf": ["mradermacher/Llama-3-linear-8B-GGUF", MessagesFormatterType.LLAMA_3],
130
+ "SAINEMO-reMIX.i1-Q4_K_M.gguf": ["mradermacher/SAINEMO-reMIX-i1-GGUF", MessagesFormatterType.MISTRAL],
131
+ "Blur-7b-slerp-v1.44.Q5_K_M.gguf": ["mradermacher/Blur-7b-slerp-v1.44-GGUF", MessagesFormatterType.MISTRAL],
132
+ "QwenStock1-14B.Q4_K_S.gguf": ["mradermacher/QwenStock1-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
133
+ "Matryoshka-8B-LINEAR.i1-Q4_K_S.gguf": ["mradermacher/Matryoshka-8B-LINEAR-i1-GGUF", MessagesFormatterType.LLAMA_3],
134
+ "8b-Base-mixed-1.Q5_K_M.gguf": ["mradermacher/8b-Base-mixed-1-GGUF", MessagesFormatterType.LLAMA_3],
135
+ "MT-Gen3-IMM-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT-Gen3-IMM-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
136
+ "MT-Gen3-IF-gemma-2-MT4g2S5-9B.Q4_K_M.gguf": ["mradermacher/MT-Gen3-IF-gemma-2-MT4g2S5-9B-GGUF", MessagesFormatterType.ALPACA],
137
+ "Nemo-DPO-v11.Q4_K_M.gguf": ["mradermacher/Nemo-DPO-v11-GGUF", MessagesFormatterType.MISTRAL],
138
+ "SeQwence-14B-EvolMerge.i1-Q4_K_M.gguf": ["mradermacher/SeQwence-14B-EvolMerge-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
139
+ "Q2.5-14B-Evalternagar.Q4_K_M.gguf": ["mradermacher/Q2.5-14B-Evalternagar-GGUF", MessagesFormatterType.OPEN_CHAT],
140
+ "RP-SAINEMO.i1-Q4_K_M.gguf": ["mradermacher/RP-SAINEMO-i1-GGUF", MessagesFormatterType.MISTRAL],
141
+ "Freyja-v4.95-New-writer7-7b-NON-FICTION.i1-Q4_K_M.gguf": ["mradermacher/Freyja-v4.95-New-writer7-7b-NON-FICTION-i1-GGUF", MessagesFormatterType.LLAMA_3],
142
+ "WIP-Acacia-8B-Model_Stock.Q5_K_M.gguf": ["mradermacher/WIP-Acacia-8B-Model_Stock-GGUF", MessagesFormatterType.LLAMA_3],
143
+ "Freyja-v4.95-Sao10K-7b-NON-FICTION.i1-Q4_K_M.gguf": ["mradermacher/Freyja-v4.95-Sao10K-7b-NON-FICTION-i1-GGUF", MessagesFormatterType.LLAMA_3],
144
+ "MT-Merge2-MUB-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT-Merge2-MUB-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
145
+ "llama3.1-8b-instruct-political-subreddits.i1-Q5_K_M.gguf": ["mradermacher/llama3.1-8b-instruct-political-subreddits-i1-GGUF", MessagesFormatterType.LLAMA_3],
146
+ "Freyja-v4.95-Undi95-7b-NON-FICTION.i1-Q5_K_M.gguf": ["mradermacher/Freyja-v4.95-Undi95-7b-NON-FICTION-i1-GGUF", MessagesFormatterType.LLAMA_3],
147
+ "Freyja-v4.95-mix-7b-NON-FICTION.i1-Q4_K_M.gguf": ["mradermacher/Freyja-v4.95-mix-7b-NON-FICTION-i1-GGUF", MessagesFormatterType.LLAMA_3],
148
+ "Condensed_Milk-8B-Model_Stock.Q4_K_S.gguf": ["mradermacher/Condensed_Milk-8B-Model_Stock-GGUF", MessagesFormatterType.LLAMA_3],
149
+ "Freyja-v4.95-maldv-7b-NON-FICTION.i1-Q4_K_S.gguf": ["mradermacher/Freyja-v4.95-maldv-7b-NON-FICTION-i1-GGUF", MessagesFormatterType.LLAMA_3],
150
+ "Freyja-v4.95-New-writer-7b-NON-FICTION.i1-Q4_K_M.gguf": ["mradermacher/Freyja-v4.95-New-writer-7b-NON-FICTION-i1-GGUF", MessagesFormatterType.LLAMA_3],
151
+ "Freyja-v4.95-Dark-Planet-7b-NON-FICTION.Q5_K_M.gguf": ["mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-GGUF", MessagesFormatterType.LLAMA_3],
152
+ "AgoraMix-14B-stock-v0.1.i1-Q4_K_M.gguf": ["mradermacher/AgoraMix-14B-stock-v0.1-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
153
+ "Keiana-L3-Test6.2-8B-18.Q5_K_M.gguf": ["mradermacher/Keiana-L3-Test6.2-8B-18-GGUF", MessagesFormatterType.LLAMA_3],
154
+ "MFANNv0.25.i1-Q5_K_M.gguf": ["mradermacher/MFANNv0.25-i1-GGUF", MessagesFormatterType.LLAMA_3],
155
+ "Qwestion-14B.i1-Q4_K_M.gguf": ["mradermacher/Qwestion-14B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
156
+ "SeQwence-14B-EvolMergev1.i1-Q4_K_M.gguf": ["mradermacher/SeQwence-14B-EvolMergev1-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
157
+ "Frigg-v1.35-8b-HIGH-FANTASY-1024k.i1-Q5_K_M.gguf": ["mradermacher/Frigg-v1.35-8b-HIGH-FANTASY-1024k-i1-GGUF", MessagesFormatterType.LLAMA_3],
158
+ "Odin-v1.0-8b-FICTION-1024k.i1-Q4_K_M.gguf": ["mradermacher/Odin-v1.0-8b-FICTION-1024k-i1-GGUF", MessagesFormatterType.LLAMA_3],
159
+ "Marco-01-slerp6-7B.Q5_K_M.gguf": ["mradermacher/Marco-01-slerp6-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
160
+ "Alita99-8B-LINEAR.Q5_K_M.gguf": ["mradermacher/Alita99-8B-LINEAR-GGUF", MessagesFormatterType.LLAMA_3],
161
+ "Thor-v1.35-8b-DARK-FANTASY-1024k.Q5_K_M.gguf": ["mradermacher/Thor-v1.35-8b-DARK-FANTASY-1024k-GGUF", MessagesFormatterType.LLAMA_3],
162
+ "Thor-v1.3a-8b-FANTASY-1024k.Q5_K_M.gguf": ["mradermacher/Thor-v1.3a-8b-FANTASY-1024k-GGUF", MessagesFormatterType.LLAMA_3],
163
+ "LemonP_ALT-8B-Model_Stock.Q5_K_M.gguf": ["mradermacher/LemonP_ALT-8B-Model_Stock-GGUF", MessagesFormatterType.LLAMA_3],
164
+ "Loki-v2.75-8b-EROTICA-1024k.Q5_K_M.gguf": ["mradermacher/Loki-v2.75-8b-EROTICA-1024k-GGUF", MessagesFormatterType.LLAMA_3],
165
+ "marco-o1-uncensored.Q5_K_M.gguf": ["mradermacher/marco-o1-uncensored-GGUF", MessagesFormatterType.OPEN_CHAT],
166
+ "SeQwence-14Bv1.i1-Q4_K_M.gguf": ["mradermacher/SeQwence-14Bv1-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
167
+ "MT5-Gen2-GP-gemma-2-MT1RGDv0.1-9B.Q4_K_M.gguf": ["mradermacher/MT5-Gen2-GP-gemma-2-MT1RGDv0.1-9B-GGUF", MessagesFormatterType.ALPACA],
168
+ "MT5-Gen2-IF-gemma-2-MT1RAv0.1-9B.Q4_K_M.gguf": ["mradermacher/MT5-Gen2-IF-gemma-2-MT1RAv0.1-9B-GGUF", MessagesFormatterType.ALPACA],
169
+ "SeQwence-14Bv2.i1-Q4_K_M.gguf": ["mradermacher/SeQwence-14Bv2-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
170
+ "ChaiML-Nemo-DPO-V8.i1-Q4_K_M.gguf": ["mradermacher/ChaiML-Nemo-DPO-V8-i1-GGUF", MessagesFormatterType.MISTRAL],
171
+ "MT4-Gen2-MAMU-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT4-Gen2-MAMU-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
172
+ "MT4-Gen2-IF-gemma-2-MT5MT1-9B.Q4_K_M.gguf": ["mradermacher/MT4-Gen2-IF-gemma-2-MT5MT1-9B-GGUF", MessagesFormatterType.ALPACA],
173
+ "Chronos-Prism_V1.0.i1-Q4_K_M.gguf": ["mradermacher/Chronos-Prism_V1.0-i1-GGUF", MessagesFormatterType.MISTRAL],
174
+ "Marco-01-slerp5-7B.Q5_K_M.gguf": ["mradermacher/Marco-01-slerp5-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
175
+ "RQwen-v0.1.Q4_K_M.gguf": ["mradermacher/RQwen-v0.1-GGUF", MessagesFormatterType.OPEN_CHAT],
176
+ "Teleut-7b.i1-Q5_K_M.gguf": ["mradermacher/Teleut-7b-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
177
+ "RP-Naughty-v1.0f-8b.i1-Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.0f-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
178
+ "MT4-Gen2-GBMAMU-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT4-Gen2-GBMAMU-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
179
+ "RP-Naughty-v1.1-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.1-8b-GGUF", MessagesFormatterType.LLAMA_3],
180
+ "RP-Naughty-v1.0e-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.0e-8b-GGUF", MessagesFormatterType.LLAMA_3],
181
+ "RP-Naughty-v1.1b-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.1b-8b-GGUF", MessagesFormatterType.LLAMA_3],
182
+ "RP-Naughty-v1.1a-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.1a-8b-GGUF", MessagesFormatterType.LLAMA_3],
183
+ "Marco-01-slerp4-7B.Q5_K_M.gguf": ["mradermacher/Marco-01-slerp4-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
184
+ "RP-Naughty-v1.0b-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.0b-8b-GGUF", MessagesFormatterType.LLAMA_3],
185
+ "RP-Naughty-v1.0c-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.0c-8b-GGUF", MessagesFormatterType.LLAMA_3],
186
+ "IceDrunkenCherryRP-7b.i1-Q5_K_M.gguf": ["mradermacher/IceDrunkenCherryRP-7b-i1-GGUF", MessagesFormatterType.ALPACA],
187
+ "Thor-v1.2-8b-1024k.i1-Q5_K_M.gguf": ["mradermacher/Thor-v1.2-8b-1024k-i1-GGUF", MessagesFormatterType.LLAMA_3],
188
+ "RP-Naughty-v1.0d-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.0d-8b-GGUF", MessagesFormatterType.LLAMA_3],
189
+ "Cakrawala-8B.i1-Q4_K_S.gguf": ["mradermacher/Cakrawala-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
190
+ "Thor-v1.1e-8b-1024k.Q5_K_M.gguf": ["mradermacher/Thor-v1.1e-8b-1024k-GGUF", MessagesFormatterType.LLAMA_3],
191
+ "RP-Naughty-v1.0a-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.0a-8b-GGUF", MessagesFormatterType.LLAMA_3],
192
+ "Qwen2.5-14B-Mixed-Instruct.Q4_K_M.gguf": ["mradermacher/Qwen2.5-14B-Mixed-Instruct-GGUF", MessagesFormatterType.OPEN_CHAT],
193
+ "WestKunai-Hermes-10.7b-test.Q4_K_M.gguf": ["mradermacher/WestKunai-Hermes-10.7b-test-GGUF", MessagesFormatterType.MISTRAL],
194
+ "Tulu-3.1-8B-SuperNova.i1-Q4_K_M.gguf": ["mradermacher/Tulu-3.1-8B-SuperNova-i1-GGUF", MessagesFormatterType.LLAMA_3],
195
+ "Loki-v2.6-8b-1024k.i1-Q5_K_M.gguf": ["mradermacher/Loki-v2.6-8b-1024k-i1-GGUF", MessagesFormatterType.LLAMA_3],
196
+ "Kosmos-8B-v1.i1-Q5_K_M.gguf": ["mradermacher/Kosmos-8B-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
197
+ "Qwen2.5-7B-Instruct-DPO-v01.Q5_K_M.gguf": ["mradermacher/Qwen2.5-7B-Instruct-DPO-v01-GGUF", MessagesFormatterType.OPEN_CHAT],
198
+ "HomerCreativeAnvita-Mix-Qw7B.i1-Q5_K_M.gguf": ["mradermacher/HomerCreativeAnvita-Mix-Qw7B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
199
+ "EVA-Tissint-v1.2-14B.i1-Q4_K_M.gguf": ["mradermacher/EVA-Tissint-v1.2-14B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
200
+ "Llama-3.1-Tulu-3-8B-abliterated.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-Tulu-3-8B-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
201
+ "Mistral-Nemo-12B-ArliAI-RPMax-v1.2.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-12B-ArliAI-RPMax-v1.2-i1-GGUF", MessagesFormatterType.MISTRAL],
202
+ "EVA-Tissint-v1.2-14B.i1-Q4_K_M.gguf": ["mradermacher/EVA-Tissint-v1.2-14B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
203
+ "Intelligence-7.i1-Q5_K_M.gguf": ["mradermacher/Intelligence-7-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
204
+ "Ice0.40-20.11-RP.i1-Q5_K_M.gguf": ["mradermacher/Ice0.40-20.11-RP-i1-GGUF", MessagesFormatterType.MISTRAL],
205
+ "Llama-3.1-8B-ArliAI-RPMax-v1.2.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-8B-ArliAI-RPMax-v1.2-i1-GGUF", MessagesFormatterType.LLAMA_3],
206
+ "SzilviaB_DarkSlushNeuralDaredevil-8b-abliterated.i1-Q5_K_M.gguf": ["mradermacher/SzilviaB_DarkSlushNeuralDaredevil-8b-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
207
+ "Qwen2.5-7B-HomerCreative-Mix.i1-Q5_K_M.gguf": ["mradermacher/Qwen2.5-7B-HomerCreative-Mix-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
208
+ "MN-Instruct-2407-14.7B-BRAINSTORM-10x-FORM-3.i1-Q4_K_M.gguf": ["mradermacher/MN-Instruct-2407-14.7B-BRAINSTORM-10x-FORM-3-i1-GGUF", MessagesFormatterType.MISTRAL],
209
+ "MN-Instruct-2407-13.35B-BRAINSTORM-5x-FORM-11.Q4_K_M.gguf": ["mradermacher/MN-Instruct-2407-13.35B-BRAINSTORM-5x-FORM-11-GGUF", MessagesFormatterType.MISTRAL],
210
+ "NeuralDarkDevil-8b-abliterated.i1-Q5_K_M.gguf": ["mradermacher/NeuralDarkDevil-8b-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
211
+ "DarkNeuralDaredevilUnholy-8b.i1-Q5_K_M.gguf": ["mradermacher/DarkNeuralDaredevilUnholy-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
212
+ "DarkAuraUnholy-Uncensored-OAS-8b.i1-Q5_K_M.gguf": ["mradermacher/DarkAuraUnholy-Uncensored-OAS-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
213
+ "DarkUnholyDareDevil-abliterated-8b.i1-Q5_K_M.gguf": ["mradermacher/DarkUnholyDareDevil-abliterated-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
214
+ "DarkDareDevilAura-abliterated-uncensored-OAS-8b.i1-Q5_K_M.gguf": ["mradermacher/DarkDareDevilAura-abliterated-uncensored-OAS-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
215
+ "DarkNeuralDareDevil-Eight-Orbs-Of-Power-8b.i1-Q5_K_M.gguf": ["mradermacher/DarkNeuralDareDevil-Eight-Orbs-Of-Power-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
216
+ "Mistral-NeuralDPO-v0.4.Q5_K_M.gguf": ["mradermacher/Mistral-NeuralDPO-v0.4-GGUF", MessagesFormatterType.MISTRAL],
217
+ "Hermes-Instruct-7B-v0.2.i1-Q5_K_M.gguf": ["mradermacher/Hermes-Instruct-7B-v0.2-i1-GGUF", MessagesFormatterType.MISTRAL],
218
+ "Llama-3.1-Tulu-3-8B-DPO-Q5_K_M.gguf": ["bartowski/Llama-3.1-Tulu-3-8B-DPO-GGUF", MessagesFormatterType.LLAMA_3],
219
+ "Platyboros-Instruct-7B.i1-Q5_K_M.gguf": ["mradermacher/Platyboros-Instruct-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
220
+ "hermes-llama3-roleplay-2000-v3.i1-Q5_K_M.gguf": ["mradermacher/hermes-llama3-roleplay-2000-v3-i1-GGUF", MessagesFormatterType.LLAMA_3],
221
+ "Hermes-Instruct-7B-100K.i1-Q5_K_M.gguf": ["mradermacher/Hermes-Instruct-7B-100K-i1-GGUF", MessagesFormatterType.MISTRAL],
222
+ "SeQwence-14B.i1-Q4_K_M.gguf": ["mradermacher/SeQwence-14B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
223
+ "Llama-3.1-Tulu-3-8B-Q5_K_M.gguf": ["bartowski/Llama-3.1-Tulu-3-8B-GGUF", MessagesFormatterType.LLAMA_3],
224
+ "Ministral-8B-Instruct-2410.Q5_K_M.gguf": ["mradermacher/Ministral-8B-Instruct-2410-GGUF", MessagesFormatterType.MISTRAL],
225
+ "Loki-v2.6-8b-1024k.Q4_K_M.gguf": ["mradermacher/Loki-v2.6-8b-1024k-GGUF", MessagesFormatterType.LLAMA_3],
226
+ "DarkUnholyPlanet-OAS-8b.i1-Q5_K_M.gguf": ["mradermacher/DarkUnholyPlanet-OAS-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
227
+ "Qwen2.5-7B-HomerAnvita-NerdMix.i1-Q4_K_M.gguf": ["mradermacher/Qwen2.5-7B-HomerAnvita-NerdMix-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
228
+ "DarkUnholyDareDevil-8b-abliterated.i1-Q4_K_M.gguf": ["mradermacher/DarkUnholyDareDevil-8b-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
229
+ "LLama3.1-Hawkish-Theia-Fireball-8B.i1-Q5_K_M.gguf": ["mradermacher/LLama3.1-Hawkish-Theia-Fireball-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
230
+ "MT3-Gen2-MU-gemma-2-GQv1-9B.Q4_K_M.gguf": ["mradermacher/MT3-Gen2-MU-gemma-2-GQv1-9B-GGUF", MessagesFormatterType.ALPACA],
231
+ "MT3-Gen2-GMM-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT3-Gen2-GMM-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
232
+ "Platyboros-Instruct-7B.Q5_K_M.gguf": ["mradermacher/Platyboros-Instruct-7B-GGUF", MessagesFormatterType.MISTRAL],
233
+ "Fuselage-8B.Q5_K_M.gguf": ["mradermacher/Fuselage-8B-GGUF", MessagesFormatterType.LLAMA_3],
234
+ "Kudzerk-8B.Q5_K_M.gguf": ["mradermacher/Kudzerk-8B-GGUF", MessagesFormatterType.LLAMA_3],
235
+ "Qwen2.5-Coder-7B-Instruct-abliterated-TIES-v2.0.i1-Q5_K_M.gguf": ["mradermacher/Qwen2.5-Coder-7B-Instruct-abliterated-TIES-v2.0-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
236
+ "B-NIMITA-L3-8B-v0.02.Q5_K_M.gguf": ["mradermacher/B-NIMITA-L3-8B-v0.02-GGUF", MessagesFormatterType.LLAMA_3],
237
+ "L3.1-Aspire-Heart-Matrix-8B.Q5_K_M.gguf": ["mradermacher/L3.1-Aspire-Heart-Matrix-8B-GGUF", MessagesFormatterType.LLAMA_3],
238
+ "HomerSlerp1-7B.Q5_K_M.gguf": ["mradermacher/HomerSlerp1-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
239
+ "MN-Slush.i1-Q4_K_M.gguf": ["mradermacher/MN-Slush-i1-GGUF", MessagesFormatterType.MISTRAL],
240
+ "HomerSlerp2-7B.i1-Q4_K_M.gguf": ["mradermacher/HomerSlerp2-7B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
241
+ "LLaMA-Mesh-Q5_K_M.gguf": ["bartowski/LLaMA-Mesh-GGUF", MessagesFormatterType.LLAMA_3],
242
+ "BgGPT-Gemma-2-9B-IT-v1.0.i1-Q4_K_M.gguf": ["mradermacher/BgGPT-Gemma-2-9B-IT-v1.0-i1-GGUF", MessagesFormatterType.ALPACA],
243
+ "Ice0.37-19.11-RP-orpo-1.i1-Q5_K_M.gguf": ["mradermacher/Ice0.37-19.11-RP-orpo-1-i1-GGUF", MessagesFormatterType.MISTRAL],
244
+ "CursedMatrix-8B-v9.i1-Q5_K_M.gguf": ["mradermacher/CursedMatrix-8B-v9-i1-GGUF", MessagesFormatterType.LLAMA_3],
245
+ "Cakrawala-8B.i1-Q5_K_M.gguf": ["mradermacher/Cakrawala-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
246
+ "JerseyDevil-14b.i1-Q4_K_M.gguf": ["mradermacher/JerseyDevil-14b-i1-GGUF", MessagesFormatterType.SOLAR],
247
+ "Llama-3.1-Jamet-8B-MK.I.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-Jamet-8B-MK.I-i1-GGUF", MessagesFormatterType.LLAMA_3],
248
+ "SeQwence-14B-v5.Q4_K_S.gguf": ["mradermacher/SeQwence-14B-v5-GGUF", MessagesFormatterType.OPEN_CHAT],
249
+ "L3.1-8B-Dark-Planet-Slush.i1-Q4_K_M.gguf": ["mradermacher/L3.1-8B-Dark-Planet-Slush-i1-GGUF", MessagesFormatterType.LLAMA_3],
250
+ "QwenSlerp12-7B.Q5_K_M.gguf": ["mradermacher/QwenSlerp12-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
251
+ "qwen-carpmuscle-v0.4.Q4_K_M.gguf": ["mradermacher/qwen-carpmuscle-v0.4-GGUF", MessagesFormatterType.OPEN_CHAT],
252
+ "L3.1-8B-Slush-v1.1.i1-Q4_K_M.gguf": ["mradermacher/L3.1-8B-Slush-v1.1-i1-GGUF", MessagesFormatterType.LLAMA_3],
253
+ "Llama-3.2-8B-Instruct.Q5_K_M.gguf": ["mradermacher/Llama-3.2-8B-Instruct-GGUF", MessagesFormatterType.LLAMA_3],
254
+ "Homer-v0.3-Qwen2.5-7B.Q5_K_M.gguf": ["mradermacher/Homer-v0.3-Qwen2.5-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
255
+ "Norns-Qwen2.5-7B-v0.2.i1-Q5_K_M.gguf": ["mradermacher/Norns-Qwen2.5-7B-v0.2-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
256
+ "LLAMA-3_8B_Unaligned_BETA.i1-Q5_K_M.gguf": ["mradermacher/LLAMA-3_8B_Unaligned_BETA-i1-GGUF", MessagesFormatterType.LLAMA_3],
257
+ "SELM-Llama-3-8B-Instruct-iter-3.Q5_K_M.gguf": ["mradermacher/SELM-Llama-3-8B-Instruct-iter-3-GGUF", MessagesFormatterType.LLAMA_3],
258
+ "ArliAI-RPMax-v1.3-merge-8B.i1-Q4_K_M.gguf": ["mradermacher/ArliAI-RPMax-v1.3-merge-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
259
+ "gemma-2-9b-it-DS-V2.Q4_K_M.gguf": ["mradermacher/gemma-2-9b-it-DS-V2-GGUF", MessagesFormatterType.ALPACA],
260
+ "Stheno-Hercules-3.1-8B.i1-Q5_K_M.gguf": ["mradermacher/Stheno-Hercules-3.1-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
261
+ "EVA-Norns-Qwen2.5-v0.1.i1-Q5_K_M.gguf": ["mradermacher/EVA-Norns-Qwen2.5-v0.1-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
262
+ "Infinirc-ArliAI-RPMax-v1.3-merge-8B.i1-Q5_K_M.gguf": ["mradermacher/Infinirc-ArliAI-RPMax-v1.3-merge-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
263
+ "Verdandi-Qwen2.5-7B.Q5_K_M.gguf": ["mradermacher/Verdandi-Qwen2.5-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
264
+ "Qwen2-Wukong-7B.Q5_K_M.gguf": ["mradermacher/Qwen2-Wukong-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
265
+ "Lelantos-7B.Q5_K_M.gguf": ["mradermacher/Lelantos-7B-GGUF", MessagesFormatterType.MISTRAL],
266
+ "Kunoichi-7B.Q5_K_M.gguf": ["mradermacher/Kunoichi-7B-GGUF", MessagesFormatterType.MISTRAL],
267
+ "llama-3-cat-8b-instruct-v1.Q5_K_M.gguf": ["mradermacher/llama-3-cat-8b-instruct-v1-GGUF", MessagesFormatterType.LLAMA_3],
268
+ "DPOpenHermes-7B.i1-Q5_K_M.gguf": ["mradermacher/DPOpenHermes-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
269
+ "NeuralDaredevil-8B-abliterated.Q5_K_M.gguf": ["mradermacher/NeuralDaredevil-8B-abliterated-GGUF", MessagesFormatterType.LLAMA_3],
270
+ "AstroSage-8B.Q5_K_M.gguf": ["mradermacher/AstroSage-8B-GGUF", MessagesFormatterType.LLAMA_3],
271
+ "Llama-3-8B-Theresa.Q5_K_M.gguf": ["mradermacher/Llama-3-8B-Theresa-GGUF", MessagesFormatterType.LLAMA_3],
272
+ "Sonya-7B.Q5_K_M.gguf": ["mradermacher/Sonya-7B-GGUF", MessagesFormatterType.MISTRAL],
273
+ "Hathor_Tahsin-L3-8B-v0.9.Q4_K_S.gguf": ["mradermacher/Hathor_Tahsin-L3-8B-v0.9-GGUF", MessagesFormatterType.LLAMA_3],
274
+ "Qwen-Qwen2.5-7B-Instruct-llamafied.Q5_K_M.gguf": ["mradermacher/Qwen-Qwen2.5-7B-Instruct-llamafied-GGUF", MessagesFormatterType.OPEN_CHAT],
275
+ "Ice0.34n-14.11-RP.i1-Q4_K_S.gguf": ["mradermacher/Ice0.34n-14.11-RP-i1-GGUF", MessagesFormatterType.MISTRAL],
276
+ "Qwen2.5-14B-Wernicke-SFT.Q4_K_S.gguf": ["mradermacher/Qwen2.5-14B-Wernicke-SFT-GGUF", MessagesFormatterType.OPEN_CHAT],
277
+ "Qwen2.5-7B-nerd-uncensored-v1.6.i1-Q4_K_S.gguf": ["mradermacher/Qwen2.5-7B-nerd-uncensored-v1.6-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
278
+ "ArliAI-RPMax-v1.3-merge-8B.Q4_K_S.gguf": ["mradermacher/ArliAI-RPMax-v1.3-merge-8B-GGUF", MessagesFormatterType.LLAMA_3],
279
+ "MN-Violet-Lotus-12B.Q4_K_M.gguf": ["mradermacher/MN-Violet-Lotus-12B-GGUF", MessagesFormatterType.MISTRAL],
280
+ "Qwen2.5-7B-nerd-uncensored-v1.8.Q5_K_M.gguf": ["mradermacher/Qwen2.5-7B-nerd-uncensored-v1.8-GGUF", MessagesFormatterType.OPEN_CHAT],
281
+ "ArliAI-RPMax-v1.3-merge-llama3-8B.i1-Q4_K_M.gguf": ["mradermacher/ArliAI-RPMax-v1.3-merge-llama3-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
282
+ "Llama-3.1-SuperNova-8B-Lite_TIES_with_Base.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-SuperNova-8B-Lite_TIES_with_Base-i1-GGUF", MessagesFormatterType.LLAMA_3],
283
+ "maverick-llama3-8B.i1-Q4_K_M.gguf": ["mradermacher/maverick-llama3-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
284
+ "JSL-MedLlama-3-8B-v2.0.i1-Q5_K_M.gguf": ["mradermacher/JSL-MedLlama-3-8B-v2.0-i1-GGUF", MessagesFormatterType.LLAMA_3],
285
+ "hiwaifu-12b.i1-Q4_K_M.gguf": ["mradermacher/hiwaifu-12b-i1-GGUF", MessagesFormatterType.MISTRAL],
286
+ "samantha-1.1-westlake-7b-laser.i1-Q5_K_M.gguf": ["mradermacher/samantha-1.1-westlake-7b-laser-i1-GGUF", MessagesFormatterType.MISTRAL],
287
+ "Wukong-0.1-Mistral-7B-v0.2.i1-Q5_K_M.gguf": ["mradermacher/Wukong-0.1-Mistral-7B-v0.2-i1-GGUF", MessagesFormatterType.MISTRAL],
288
+ "InfinityRP-v1-7B.i1-Q5_K_M.gguf": ["mradermacher/InfinityRP-v1-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
289
+ "L3-Luna-8B.i1-Q5_K_M.gguf": ["mradermacher/L3-Luna-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
290
+ "Awanllm-Llama-3-8B-Instruct-ORPO-v0.1.i1-Q5_K_M.gguf": ["mradermacher/Awanllm-Llama-3-8B-Instruct-ORPO-v0.1-i1-GGUF", MessagesFormatterType.LLAMA_3],
291
+ "dolphin-2.9.1-llama-3-8b.i1-Q5_K_M.gguf": ["mradermacher/dolphin-2.9.1-llama-3-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
292
+ "EVA-Tissint-14B.i1-Q4_K_M.gguf": ["mradermacher/EVA-Tissint-14B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
293
+ "EVA-Tissint-v1.1-14B.Q4_K_M.gguf": ["mradermacher/EVA-Tissint-v1.1-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
294
+ "magnum-12b-v2.5-kto.i1-Q4_K_M.gguf": ["mradermacher/magnum-12b-v2.5-kto-i1-GGUF", MessagesFormatterType.CHATML],
295
+ "L3.1-8B-Slush.i1-Q5_K_M.gguf": ["mradermacher/L3.1-8B-Slush-i1-GGUF", MessagesFormatterType.LLAMA_3],
296
+ "QandoraExp-7B-Persona.i1-Q5_K_M.gguf": ["mradermacher/QandoraExp-7B-Persona-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
297
+ "Pantheon-RP-1.6-12b-Nemo-KTO.i1-Q4_K_M.gguf": ["mradermacher/Pantheon-RP-1.6-12b-Nemo-KTO-i1-GGUF", MessagesFormatterType.CHATML],
298
+ "Unaligned-Base-8b-1024K.i1-Q5_K_M.gguf": ["mradermacher/Unaligned-Base-8b-1024K-i1-GGUF", MessagesFormatterType.LLAMA_3],
299
+ "Mistral-Nemo-Prism-12B-v6.Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-Prism-12B-v6-GGUF", MessagesFormatterType.MISTRAL],
300
+ "hermes-llama3-roleplay-1000-v3.Q5_K_M.gguf": ["mradermacher/hermes-llama3-roleplay-1000-v3-GGUF", MessagesFormatterType.LLAMA_3],
301
+ "Prismatic-12b.Q4_K_M.gguf": ["mradermacher/Prismatic-12b-GGUF", MessagesFormatterType.MISTRAL],
302
+ "Qwen-2.5-Aether-SlerpFusion-7B.i1-Q5_K_M.gguf": ["mradermacher/Qwen-2.5-Aether-SlerpFusion-7B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
303
+ "miscii-14b-1028.i1-Q4_K_M.gguf": ["mradermacher/miscii-14b-1028-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
304
+ "DarkAtom-12B-v3.i1-Q4_K_M.gguf": ["mradermacher/DarkAtom-12B-v3-i1-GGUF", MessagesFormatterType.MISTRAL],
305
+ "Mistral-Nemo-Prism-12B.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-Prism-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
306
+ "QandoraExp-7B-v2.Q5_K_M.gguf": ["mradermacher/QandoraExp-7B-v2-GGUF", MessagesFormatterType.OPEN_CHAT],
307
+ "Kunocchini-7b-128k-test.Q5_K_M.gguf": ["mradermacher/Kunocchini-7b-128k-test-GGUF", MessagesFormatterType.MISTRAL],
308
+ "MT2-Gen2-BGMAMU-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT2-Gen2-BGMAMU-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
309
+ "dolphin-mixtral-2x7b.i1-Q4_K_M.gguf": ["mradermacher/dolphin-mixtral-2x7b-i1-GGUF", MessagesFormatterType.MISTRAL],
310
+ "NightyGurps-14b-v1.1.Q4_K_M.gguf": ["mradermacher/NightyGurps-14b-v1.1-GGUF", MessagesFormatterType.OPEN_CHAT],
311
+ "MT2-Gen2-BB-gemma-2-MTMMT5-9B.Q4_K_M.gguf": ["mradermacher/MT2-Gen2-BB-gemma-2-MTMMT5-9B-GGUF", MessagesFormatterType.ALPACA],
312
+ "MT2-Gen2-IMM-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT2-Gen2-IMM-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
313
+ "Unaligned-RP-Base-8b-3.Q5_K_M.gguf": ["mradermacher/Unaligned-RP-Base-8b-3-GGUF", MessagesFormatterType.LLAMA_3],
314
+ "Unaligned-RP-Base-8b-2.Q5_K_M.gguf": ["mradermacher/Unaligned-RP-Base-8b-2-GGUF", MessagesFormatterType.LLAMA_3],
315
+ "LongWriter-Qwen2.5-7B-Instruct.i1-Q5_K_M.gguf": ["mradermacher/LongWriter-Qwen2.5-7B-Instruct-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
316
+ "Everyone-LLM-7b-Base.i1-Q5_K_M.gguf": ["mradermacher/Everyone-LLM-7b-Base-i1-GGUF", MessagesFormatterType.MISTRAL],
317
+ "QwenSlerp8-7B.Q5_K_M.gguf": ["mradermacher/QwenSlerp8-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
318
+ "Tess-10.7B-v1.5.Q4_K_M.gguf": ["mradermacher/Tess-10.7B-v1.5-GGUF", MessagesFormatterType.LLAMA_3],
319
+ "Rombos-Coder-V2.5-Qwen-7b.i1-Q5_K_M.gguf": ["mradermacher/Rombos-Coder-V2.5-Qwen-7b-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
320
+ "Qwen2.5-Gutenberg-Doppel-14B.Q4_K_M.gguf": ["mradermacher/Qwen2.5-Gutenberg-Doppel-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
321
+ "MT-Gen2-MU-gemma-2-MT1RAv0.1t0.25-9B.Q4_K_M.gguf": ["mradermacher/MT-Gen2-MU-gemma-2-MT1RAv0.1t0.25-9B-GGUF", MessagesFormatterType.ALPACA],
322
+ "MT1-Gen2-GP-gemma-2-MT1DMv1-9B.Q4_K_M.gguf": ["mradermacher/MT1-Gen2-GP-gemma-2-MT1DMv1-9B-GGUF", MessagesFormatterType.ALPACA],
323
+ "MT1-Gen2-GMA-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT1-Gen2-GMA-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
324
+ "MT1-Gen2-MMMU-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT1-Gen2-MMMU-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
325
+ "LGRC-7B-slerp.Q5_K_M.gguf": ["mradermacher/LGRC-7B-slerp-GGUF", MessagesFormatterType.MISTRAL],
326
+ "Ice0.32-10.11-RP.Q5_K_M.gguf": ["mradermacher/Ice0.32-10.11-RP-GGUF", MessagesFormatterType.MISTRAL],
327
+ "Vecteus-v1.i1-Q5_K_M.gguf": ["mradermacher/Vecteus-v1-i1-GGUF", MessagesFormatterType.MISTRAL],
328
+ "L3.1-BaeZel-8B-Della.Q4_K_S.gguf": ["mradermacher/L3.1-BaeZel-8B-Della-GGUF", MessagesFormatterType.LLAMA_3],
329
+ "Qwen2.5-14B-Instruct-SEALONG.i1-Q4_K_M.gguf": ["mradermacher/Qwen2.5-14B-Instruct-SEALONG-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
330
+ "Qwen2.5-7B-Instruct-SEALONG.i1-Q5_K_M.gguf": ["mradermacher/Qwen2.5-7B-Instruct-SEALONG-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
331
+ "Fraken-Maid-TW-K-Slerp.i1-Q5_K_M.gguf": ["mradermacher/Fraken-Maid-TW-K-Slerp-i1-GGUF", MessagesFormatterType.MISTRAL],
332
+ "AutoTrain-Qwen-Rui-Elite.Q5_K_M.gguf": ["mradermacher/AutoTrain-Qwen-Rui-Elite-GGUF", MessagesFormatterType.OPEN_CHAT],
333
  "dolphin-2.6-mistral-7b-dpo-laser.Q4_K_S.gguf": ["mradermacher/dolphin-2.6-mistral-7b-dpo-laser-GGUF", MessagesFormatterType.MISTRAL],
334
  "Flowable-Docs-Llama-3.1-8B.Q5_K_M.gguf": ["mradermacher/Flowable-Docs-Llama-3.1-8B-GGUF", MessagesFormatterType.LLAMA_3],
335
  "slimorca-gemma2-9b-fft.Q4_K_M.gguf": ["mradermacher/slimorca-gemma2-9b-fft-GGUF", MessagesFormatterType.ALPACA],
 
1146
  "Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW_iMat_Ch200_IQ4_XS.gguf": ["dddump/Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW-gguf", MessagesFormatterType.VICUNA],
1147
  "ChatWaifu_v1.2.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.2.1-GGUF", MessagesFormatterType.MISTRAL],
1148
  "ChatWaifu_v1.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.1-GGUF", MessagesFormatterType.MISTRAL],
1149
+ "ChatWaifu_v1.0.i1-Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.0-i1-GGUF", MessagesFormatterType.MISTRAL],
1150
  "Ninja-V2-7B_Q4_K_M.gguf": ["Local-Novel-LLM-project/Ninja-V2-7B-GGUF", MessagesFormatterType.VICUNA],
1151
  "Yamase-12B.Q4_K_M.gguf": ["mradermacher/Yamase-12B-GGUF", MessagesFormatterType.MISTRAL],
1152
  "borea-phi-3.5-mini-instruct-common.Q5_K_M.gguf": ["keitokei1994/Borea-Phi-3.5-mini-Instruct-Common-GGUF", MessagesFormatterType.PHI_3],
modutils.py CHANGED
@@ -302,6 +302,10 @@ def safe_float(input):
302
  return output
303
 
304
 
 
 
 
 
305
  def save_images(images: list[Image.Image], metadatas: list[str]):
306
  from PIL import PngImagePlugin
307
  import uuid
@@ -566,7 +570,8 @@ private_lora_model_list = get_private_lora_model_lists()
566
 
567
  def get_civitai_info(path):
568
  global civitai_not_exists_list
569
- if path in set(civitai_not_exists_list): return ["", "", "", "", ""]
 
570
  if not Path(path).exists(): return None
571
  user_agent = get_user_agent()
572
  headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
@@ -584,12 +589,12 @@ def get_civitai_info(path):
584
  r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
585
  except Exception as e:
586
  print(e)
587
- return ["", "", "", "", ""]
588
  if not r.ok: return None
589
  json = r.json()
590
  if not 'baseModel' in json:
591
  civitai_not_exists_list.append(path)
592
- return ["", "", "", "", ""]
593
  items = []
594
  items.append(" / ".join(json['trainedWords']))
595
  items.append(json['baseModel'])
@@ -690,7 +695,7 @@ def copy_lora(path: str, new_path: str):
690
  return None
691
 
692
 
693
- def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str):
694
  path = download_lora(dl_urls)
695
  if path:
696
  if not lora1 or lora1 == "None":
@@ -703,9 +708,13 @@ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: st
703
  lora4 = path
704
  elif not lora5 or lora5 == "None":
705
  lora5 = path
 
 
 
 
706
  choices = get_all_lora_tupled_list()
707
  return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
708
- gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
709
 
710
 
711
  def get_valid_lora_name(query: str, model_name: str):
@@ -745,25 +754,31 @@ def get_valid_lora_wt(prompt: str, lora_path: str, lora_wt: float):
745
  return wt
746
 
747
 
748
- def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
749
- if not "Classic" in str(prompt_syntax): return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
750
  lora1 = get_valid_lora_name(lora1, model_name)
751
  lora2 = get_valid_lora_name(lora2, model_name)
752
  lora3 = get_valid_lora_name(lora3, model_name)
753
  lora4 = get_valid_lora_name(lora4, model_name)
754
  lora5 = get_valid_lora_name(lora5, model_name)
755
- if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
 
 
756
  lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
757
  lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
758
  lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
759
  lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
760
  lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
 
 
761
  on1, label1, tag1, md1 = get_lora_info(lora1)
762
  on2, label2, tag2, md2 = get_lora_info(lora2)
763
  on3, label3, tag3, md3 = get_lora_info(lora3)
764
  on4, label4, tag4, md4 = get_lora_info(lora4)
765
  on5, label5, tag5, md5 = get_lora_info(lora5)
766
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
 
 
767
  prompts = prompt.split(",") if prompt else []
768
  for p in prompts:
769
  p = str(p).strip()
@@ -773,37 +788,47 @@ def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2,
773
  key = result[0][0]
774
  wt = result[0][1]
775
  path = to_lora_path(key)
776
- if not key in loras_dict.keys() or not path:
777
  path = get_valid_lora_name(path)
778
  if not path or path == "None": continue
779
- if path in lora_paths:
780
  continue
781
  elif not on1:
782
  lora1 = path
783
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
784
  lora1_wt = safe_float(wt)
785
  on1 = True
786
  elif not on2:
787
  lora2 = path
788
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
789
  lora2_wt = safe_float(wt)
790
  on2 = True
791
  elif not on3:
792
  lora3 = path
793
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
794
  lora3_wt = safe_float(wt)
795
  on3 = True
796
  elif not on4:
797
  lora4 = path
798
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
799
  lora4_wt = safe_float(wt)
800
- on4, label4, tag4, md4 = get_lora_info(lora4)
801
  elif not on5:
802
  lora5 = path
803
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
804
  lora5_wt = safe_float(wt)
805
  on5 = True
806
- return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
 
 
 
 
 
 
 
 
 
 
807
 
808
 
809
  def get_lora_info(lora_path: str):
@@ -864,13 +889,15 @@ def apply_lora_prompt(prompt: str = "", lora_info: str = ""):
864
  return gr.update(value=prompt)
865
 
866
 
867
- def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
868
  on1, label1, tag1, md1 = get_lora_info(lora1)
869
  on2, label2, tag2, md2 = get_lora_info(lora2)
870
  on3, label3, tag3, md3 = get_lora_info(lora3)
871
  on4, label4, tag4, md4 = get_lora_info(lora4)
872
  on5, label5, tag5, md5 = get_lora_info(lora5)
873
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
 
 
874
 
875
  output_prompt = prompt
876
  if "Classic" in str(prompt_syntax):
@@ -895,6 +922,8 @@ def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3,
895
  if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
896
  if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
897
  if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
 
 
898
  output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
899
  choices = get_all_lora_tupled_list()
900
 
@@ -907,7 +936,11 @@ def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3,
907
  gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
908
  gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
909
  gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
910
- gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
 
 
 
 
911
 
912
 
913
  def get_my_lora(link_url, romanize):
@@ -926,7 +959,6 @@ def get_my_lora(link_url, romanize):
926
  path.resolve().rename(new_path.resolve())
927
  update_lora_dict(str(new_path))
928
  l_path = str(new_path)
929
- new_lora_model_list = get_lora_model_list()
930
  new_lora_tupled_list = get_all_lora_tupled_list()
931
  msg_lora = "Downloaded"
932
  if l_name:
@@ -943,6 +975,10 @@ def get_my_lora(link_url, romanize):
943
  choices=new_lora_tupled_list
944
  ), gr.update(
945
  choices=new_lora_tupled_list
 
 
 
 
946
  ), gr.update(
947
  value=msg_lora
948
  )
@@ -975,12 +1011,19 @@ def move_file_lora(filepaths):
975
  choices=new_lora_tupled_list
976
  ), gr.update(
977
  choices=new_lora_tupled_list
 
 
 
 
978
  )
979
 
980
 
981
- CIVITAI_SORT = ["Highest Rated", "Most Downloaded", "Newest"]
982
  CIVITAI_PERIOD = ["AllTime", "Year", "Month", "Week", "Day"]
983
- CIVITAI_BASEMODEL = ["Pony", "Illustrious", "SDXL 1.0", "SD 1.5", "Flux.1 D", "Flux.1 S"]
 
 
 
984
 
985
 
986
  def get_civitai_info(path):
@@ -1025,6 +1068,7 @@ def search_lora_on_civitai(query: str, allow_model: list[str] = ["Pony", "SDXL 1
1025
  sort: str = "Highest Rated", period: str = "AllTime", tag: str = "", user: str = "", page: int = 1):
1026
  user_agent = get_user_agent()
1027
  headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
 
1028
  base_url = 'https://civitai.com/api/v1/models'
1029
  params = {'types': ['LORA'], 'sort': sort, 'period': period, 'limit': limit, 'page': int(page), 'nsfw': 'true'}
1030
  if query: params["query"] = query
 
302
  return output
303
 
304
 
305
+ def valid_model_name(model_name: str):
306
+ return model_name.split(" ")[0]
307
+
308
+
309
  def save_images(images: list[Image.Image], metadatas: list[str]):
310
  from PIL import PngImagePlugin
311
  import uuid
 
570
 
571
  def get_civitai_info(path):
572
  global civitai_not_exists_list
573
+ default = ["", "", "", "", ""]
574
+ if path in set(civitai_not_exists_list): return default
575
  if not Path(path).exists(): return None
576
  user_agent = get_user_agent()
577
  headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
 
589
  r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
590
  except Exception as e:
591
  print(e)
592
+ return default
593
  if not r.ok: return None
594
  json = r.json()
595
  if not 'baseModel' in json:
596
  civitai_not_exists_list.append(path)
597
+ return default
598
  items = []
599
  items.append(" / ".join(json['trainedWords']))
600
  items.append(json['baseModel'])
 
695
  return None
696
 
697
 
698
+ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str, lora6: str, lora7: str):
699
  path = download_lora(dl_urls)
700
  if path:
701
  if not lora1 or lora1 == "None":
 
708
  lora4 = path
709
  elif not lora5 or lora5 == "None":
710
  lora5 = path
711
+ #elif not lora6 or lora6 == "None":
712
+ # lora6 = path
713
+ #elif not lora7 or lora7 == "None":
714
+ # lora7 = path
715
  choices = get_all_lora_tupled_list()
716
  return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
717
+ gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices), gr.update(value=lora6, choices=choices), gr.update(value=lora7, choices=choices)
718
 
719
 
720
  def get_valid_lora_name(query: str, model_name: str):
 
754
  return wt
755
 
756
 
757
+ def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt):
758
+ if not "Classic" in str(prompt_syntax): return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt
759
  lora1 = get_valid_lora_name(lora1, model_name)
760
  lora2 = get_valid_lora_name(lora2, model_name)
761
  lora3 = get_valid_lora_name(lora3, model_name)
762
  lora4 = get_valid_lora_name(lora4, model_name)
763
  lora5 = get_valid_lora_name(lora5, model_name)
764
+ #lora6 = get_valid_lora_name(lora6, model_name)
765
+ #lora7 = get_valid_lora_name(lora7, model_name)
766
+ if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt
767
  lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
768
  lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
769
  lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
770
  lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
771
  lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
772
+ #lora6_wt = get_valid_lora_wt(prompt, lora6, lora5_wt)
773
+ #lora7_wt = get_valid_lora_wt(prompt, lora7, lora5_wt)
774
  on1, label1, tag1, md1 = get_lora_info(lora1)
775
  on2, label2, tag2, md2 = get_lora_info(lora2)
776
  on3, label3, tag3, md3 = get_lora_info(lora3)
777
  on4, label4, tag4, md4 = get_lora_info(lora4)
778
  on5, label5, tag5, md5 = get_lora_info(lora5)
779
+ #on6, label6, tag6, md6 = get_lora_info(lora6)
780
+ #on7, label7, tag7, md7 = get_lora_info(lora7)
781
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
782
  prompts = prompt.split(",") if prompt else []
783
  for p in prompts:
784
  p = str(p).strip()
 
788
  key = result[0][0]
789
  wt = result[0][1]
790
  path = to_lora_path(key)
791
+ if not key in loras_dict.keys() or not Path(path).exists():
792
  path = get_valid_lora_name(path)
793
  if not path or path == "None": continue
794
+ if path in lora_paths or key in lora_paths:
795
  continue
796
  elif not on1:
797
  lora1 = path
798
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
799
  lora1_wt = safe_float(wt)
800
  on1 = True
801
  elif not on2:
802
  lora2 = path
803
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
804
  lora2_wt = safe_float(wt)
805
  on2 = True
806
  elif not on3:
807
  lora3 = path
808
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
809
  lora3_wt = safe_float(wt)
810
  on3 = True
811
  elif not on4:
812
  lora4 = path
813
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
814
  lora4_wt = safe_float(wt)
815
+ on4 = True
816
  elif not on5:
817
  lora5 = path
818
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
819
  lora5_wt = safe_float(wt)
820
  on5 = True
821
+ #elif not on6:
822
+ # lora6 = path
823
+ # lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
824
+ # lora6_wt = safe_float(wt)
825
+ # on6 = True
826
+ #elif not on7:
827
+ # lora7 = path
828
+ # lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
829
+ # lora7_wt = safe_float(wt)
830
+ # on7 = True
831
+ return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt
832
 
833
 
834
  def get_lora_info(lora_path: str):
 
889
  return gr.update(value=prompt)
890
 
891
 
892
+ def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt):
893
  on1, label1, tag1, md1 = get_lora_info(lora1)
894
  on2, label2, tag2, md2 = get_lora_info(lora2)
895
  on3, label3, tag3, md3 = get_lora_info(lora3)
896
  on4, label4, tag4, md4 = get_lora_info(lora4)
897
  on5, label5, tag5, md5 = get_lora_info(lora5)
898
+ on6, label6, tag6, md6 = get_lora_info(lora6)
899
+ on7, label7, tag7, md7 = get_lora_info(lora7)
900
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
901
 
902
  output_prompt = prompt
903
  if "Classic" in str(prompt_syntax):
 
922
  if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
923
  if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
924
  if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
925
+ #if on6: lora_prompts.append(f"<lora:{to_lora_key(lora6)}:{lora6_wt:.2f}>")
926
+ #if on7: lora_prompts.append(f"<lora:{to_lora_key(lora7)}:{lora7_wt:.2f}>")
927
  output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
928
  choices = get_all_lora_tupled_list()
929
 
 
936
  gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
937
  gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
938
  gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
939
+ gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5),\
940
+ gr.update(value=lora6, choices=choices), gr.update(value=lora6_wt),\
941
+ gr.update(value=tag6, label=label6, visible=on6), gr.update(visible=on6), gr.update(value=md6, visible=on6),\
942
+ gr.update(value=lora7, choices=choices), gr.update(value=lora7_wt),\
943
+ gr.update(value=tag7, label=label7, visible=on7), gr.update(visible=on7), gr.update(value=md7, visible=on7)
944
 
945
 
946
  def get_my_lora(link_url, romanize):
 
959
  path.resolve().rename(new_path.resolve())
960
  update_lora_dict(str(new_path))
961
  l_path = str(new_path)
 
962
  new_lora_tupled_list = get_all_lora_tupled_list()
963
  msg_lora = "Downloaded"
964
  if l_name:
 
975
  choices=new_lora_tupled_list
976
  ), gr.update(
977
  choices=new_lora_tupled_list
978
+ ), gr.update(
979
+ choices=new_lora_tupled_list
980
+ ), gr.update(
981
+ choices=new_lora_tupled_list
982
  ), gr.update(
983
  value=msg_lora
984
  )
 
1011
  choices=new_lora_tupled_list
1012
  ), gr.update(
1013
  choices=new_lora_tupled_list
1014
+ ), gr.update(
1015
+ choices=new_lora_tupled_list
1016
+ ), gr.update(
1017
+ choices=new_lora_tupled_list
1018
  )
1019
 
1020
 
1021
+ CIVITAI_SORT = ["Highest Rated", "Most Downloaded", "Most Liked", "Most Discussed", "Most Collected", "Most Buzz", "Newest"]
1022
  CIVITAI_PERIOD = ["AllTime", "Year", "Month", "Week", "Day"]
1023
+ CIVITAI_BASEMODEL = ["Pony", "Illustrious", "SDXL 1.0", "SD 1.5", "Flux.1 D", "Flux.1 S"] # , "SD 3.5"
1024
+ CIVITAI_TYPE = ["Checkpoint", "TextualInversion", "Hypernetwork", "AestheticGradient", "LORA", "LoCon", "DoRA",
1025
+ "Controlnet", "Upscaler", "MotionModule", "VAE", "Poses", "Wildcards", "Workflows", "Other"]
1026
+ CIVITAI_FILETYPE = ["Model", "VAE", "Config", "Training Data"]
1027
 
1028
 
1029
  def get_civitai_info(path):
 
1068
  sort: str = "Highest Rated", period: str = "AllTime", tag: str = "", user: str = "", page: int = 1):
1069
  user_agent = get_user_agent()
1070
  headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
1071
+ if CIVITAI_API_KEY: headers['Authorization'] = f'Bearer {{{CIVITAI_API_KEY}}}'
1072
  base_url = 'https://civitai.com/api/v1/models'
1073
  params = {'types': ['LORA'], 'sort': sort, 'period': period, 'limit': limit, 'page': int(page), 'nsfw': 'true'}
1074
  if query: params["query"] = query
requirements.txt CHANGED
@@ -4,8 +4,9 @@ diffusers
4
  invisible_watermark
5
  transformers
6
  xformers
7
- git+https://github.com/R3gm/stablepy.git@ed51089 # -b refactor_sampler_fix
8
  torch==2.2.0
 
9
  gdown
10
  opencv-python
11
  huggingface_hub
 
4
  invisible_watermark
5
  transformers
6
  xformers
7
+ git+https://github.com/R3gm/stablepy.git@a9fe2dc # -b refactor_sampler_fix
8
  torch==2.2.0
9
+ numpy<2
10
  gdown
11
  opencv-python
12
  huggingface_hub
utils.py CHANGED
@@ -7,11 +7,17 @@ from constants import (
7
  HF_TOKEN,
8
  MODEL_TYPE_CLASS,
9
  DIRECTORY_LORAS,
 
 
 
 
10
  )
11
  from huggingface_hub import HfApi
 
12
  from diffusers import DiffusionPipeline
13
  from huggingface_hub import model_info as model_info_data
14
  from diffusers.pipelines.pipeline_loading_utils import variant_compatible_siblings
 
15
  from pathlib import PosixPath
16
  from unidecode import unidecode
17
  import urllib.parse
@@ -19,6 +25,8 @@ import copy
19
  import requests
20
  from requests.adapters import HTTPAdapter
21
  from urllib3.util import Retry
 
 
22
 
23
  USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
24
 
@@ -107,7 +115,11 @@ def download_things(directory, url, hf_token="", civitai_api_key="", romanize=Fa
107
  print("\033[91mYou need an API key to download Civitai models.\033[0m")
108
 
109
  model_profile = retrieve_model_info(url)
110
- if model_profile.download_url and model_profile.filename_url:
 
 
 
 
111
  url = model_profile.download_url
112
  filename = unidecode(model_profile.filename_url) if romanize else model_profile.filename_url
113
  else:
@@ -262,6 +274,10 @@ def get_my_lora(link_url, romanize):
262
  choices=new_lora_model_list
263
  ), gr.update(
264
  choices=new_lora_model_list
 
 
 
 
265
  ), gr.update(
266
  value=msg_lora
267
  )
@@ -283,10 +299,15 @@ def get_model_type(repo_id: str):
283
  api = HfApi(token=os.environ.get("HF_TOKEN")) # if use private or gated model
284
  default = "SD 1.5"
285
  try:
286
- model = api.model_info(repo_id=repo_id, timeout=5.0)
287
- tags = model.tags
288
- for tag in tags:
289
- if tag in MODEL_TYPE_CLASS.keys(): return MODEL_TYPE_CLASS.get(tag, default)
 
 
 
 
 
290
  except Exception:
291
  return default
292
  return default
@@ -306,7 +327,8 @@ def restart_space(repo_id: str, factory_reboot: bool):
306
 
307
 
308
  def extract_exif_data(image):
309
- if image is None: return ""
 
310
 
311
  try:
312
  metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
@@ -371,17 +393,23 @@ def download_diffuser_repo(repo_name: str, model_type: str, revision: str = "mai
371
  if len(variant_filenames):
372
  variant = "fp16"
373
 
374
- cached_folder = DiffusionPipeline.download(
375
- pretrained_model_name=repo_name,
376
- force_download=False,
377
- token=token,
378
- revision=revision,
379
- # mirror="https://hf-mirror.com",
380
- variant=variant,
381
- use_safetensors=True,
382
- trust_remote_code=False,
383
- timeout=5.0,
384
- )
 
 
 
 
 
 
385
 
386
  if isinstance(cached_folder, PosixPath):
387
  cached_folder = cached_folder.as_posix()
@@ -396,6 +424,37 @@ def download_diffuser_repo(repo_name: str, model_type: str, revision: str = "mai
396
  return cached_folder
397
 
398
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
399
  def progress_step_bar(step, total):
400
  # Calculate the percentage for the progress bar width
401
  percentage = min(100, ((step / total) * 100))
 
7
  HF_TOKEN,
8
  MODEL_TYPE_CLASS,
9
  DIRECTORY_LORAS,
10
+ DIRECTORY_MODELS,
11
+ DIFFUSECRAFT_CHECKPOINT_NAME,
12
+ CACHE_HF,
13
+ STORAGE_ROOT,
14
  )
15
  from huggingface_hub import HfApi
16
+ from huggingface_hub import snapshot_download
17
  from diffusers import DiffusionPipeline
18
  from huggingface_hub import model_info as model_info_data
19
  from diffusers.pipelines.pipeline_loading_utils import variant_compatible_siblings
20
+ from stablepy.diffusers_vanilla.utils import checkpoint_model_type
21
  from pathlib import PosixPath
22
  from unidecode import unidecode
23
  import urllib.parse
 
25
  import requests
26
  from requests.adapters import HTTPAdapter
27
  from urllib3.util import Retry
28
+ import shutil
29
+ import subprocess
30
 
31
  USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
32
 
 
115
  print("\033[91mYou need an API key to download Civitai models.\033[0m")
116
 
117
  model_profile = retrieve_model_info(url)
118
+ if (
119
+ model_profile is not None
120
+ and model_profile.download_url
121
+ and model_profile.filename_url
122
+ ):
123
  url = model_profile.download_url
124
  filename = unidecode(model_profile.filename_url) if romanize else model_profile.filename_url
125
  else:
 
274
  choices=new_lora_model_list
275
  ), gr.update(
276
  choices=new_lora_model_list
277
+ ), gr.update(
278
+ choices=new_lora_model_list
279
+ ), gr.update(
280
+ choices=new_lora_model_list
281
  ), gr.update(
282
  value=msg_lora
283
  )
 
299
  api = HfApi(token=os.environ.get("HF_TOKEN")) # if use private or gated model
300
  default = "SD 1.5"
301
  try:
302
+ if os.path.exists(repo_id):
303
+ tag = checkpoint_model_type(repo_id)
304
+ return DIFFUSECRAFT_CHECKPOINT_NAME[tag]
305
+ else:
306
+ model = api.model_info(repo_id=repo_id, timeout=5.0)
307
+ tags = model.tags
308
+ for tag in tags:
309
+ if tag in MODEL_TYPE_CLASS.keys(): return MODEL_TYPE_CLASS.get(tag, default)
310
+
311
  except Exception:
312
  return default
313
  return default
 
327
 
328
 
329
  def extract_exif_data(image):
330
+ if image is None:
331
+ return ""
332
 
333
  try:
334
  metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
 
393
  if len(variant_filenames):
394
  variant = "fp16"
395
 
396
+ if model_type == "FLUX":
397
+ cached_folder = snapshot_download(
398
+ repo_id=repo_name,
399
+ allow_patterns="transformer/*"
400
+ )
401
+ else:
402
+ cached_folder = DiffusionPipeline.download(
403
+ pretrained_model_name=repo_name,
404
+ force_download=False,
405
+ token=token,
406
+ revision=revision,
407
+ # mirror="https://hf-mirror.com",
408
+ variant=variant,
409
+ use_safetensors=True,
410
+ trust_remote_code=False,
411
+ timeout=5.0,
412
+ )
413
 
414
  if isinstance(cached_folder, PosixPath):
415
  cached_folder = cached_folder.as_posix()
 
424
  return cached_folder
425
 
426
 
427
+ def get_folder_size_gb(folder_path):
428
+ result = subprocess.run(["du", "-s", folder_path], capture_output=True, text=True)
429
+
430
+ total_size_kb = int(result.stdout.split()[0])
431
+ total_size_gb = total_size_kb / (1024 ** 2)
432
+
433
+ return total_size_gb
434
+
435
+
436
+ def get_used_storage_gb():
437
+ try:
438
+ used_gb = get_folder_size_gb(STORAGE_ROOT)
439
+ print(f"Used Storage: {used_gb:.2f} GB")
440
+ except Exception as e:
441
+ used_gb = 999
442
+ print(f"Error while retrieving the used storage: {e}.")
443
+
444
+ return used_gb
445
+
446
+
447
+ def delete_model(removal_candidate):
448
+ print(f"Removing: {removal_candidate}")
449
+
450
+ if os.path.exists(removal_candidate):
451
+ os.remove(removal_candidate)
452
+ else:
453
+ diffusers_model = f"{CACHE_HF}{DIRECTORY_MODELS}--{removal_candidate.replace('/', '--')}"
454
+ if os.path.isdir(diffusers_model):
455
+ shutil.rmtree(diffusers_model)
456
+
457
+
458
  def progress_step_bar(step, total):
459
  # Calculate the percentage for the progress bar width
460
  percentage = min(100, ((step / total) * 100))