John6666 commited on
Commit
7cf64e8
1 Parent(s): ed9da47

Upload 9 files

Browse files
Files changed (5) hide show
  1. README.md +1 -1
  2. app.py +251 -243
  3. convert_url_to_diffusers_multi_gr.py +38 -24
  4. presets.py +147 -134
  5. utils.py +21 -14
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🎨➡️🧨
4
  colorFrom: indigo
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 5.6.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
4
  colorFrom: indigo
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 5.9.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
app.py CHANGED
@@ -1,243 +1,251 @@
1
- import gradio as gr
2
- from convert_url_to_diffusers_multi_gr import convert_url_to_diffusers_repo, get_dtypes, FLUX_BASE_REPOS, SD35_BASE_REPOS
3
- from presets import (DEFAULT_DTYPE, schedulers, clips, t5s, sdxl_vaes, sdxl_loras, sdxl_preset_dict, sdxl_set_presets,
4
- sd15_vaes, sd15_loras, sd15_preset_dict, sd15_set_presets, flux_vaes, flux_loras, flux_preset_dict, flux_set_presets,
5
- sd35_vaes, sd35_loras, sd35_preset_dict, sd35_set_presets)
6
-
7
- css = """
8
- .title { font-size: 3em; align-items: center; text-align: center; }
9
- .info { align-items: center; text-align: center; }
10
- .block.result { margin: 1em 0; padding: 1em; box-shadow: 0 0 3px 3px #664422, 0 0 3px 2px #664422 inset; border-radius: 6px; background: #665544; }
11
- """
12
-
13
- with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", fill_width=True, css=css, delete_cache=(60, 3600)) as demo:
14
- gr.Markdown("# Download SDXL / SD 1.5 / SD 3.5 / FLUX.1 safetensors and convert to HF🤗 Diffusers format and create your repo", elem_classes="title")
15
- gr.Markdown(f"""
16
- ### ⚠️IMPORTANT NOTICE⚠️<br>
17
- It's dangerous to expose your access token or key to others.
18
- If you do use it, I recommend that you duplicate this space on your own HF account in advance.
19
- Keys and tokens could be set to **Secrets** (`HF_TOKEN`, `CIVITAI_API_KEY`) if it's placed in your own space.
20
- It saves you the trouble of typing them in.<br>
21
- It barely works in the CPU space, but larger files can be converted if duplicated on the more powerful **Zero GPU** space.
22
- In particular, conversion of FLUX.1 or SD 3.5 is almost impossible in CPU space.
23
- ### The steps are the following:
24
- 1. Paste a write-access token from [hf.co/settings/tokens](https://huggingface.co/settings/tokens).
25
- 1. Input a model download url of the Hugging Face or Civitai or other sites.
26
- 1. If you want to download a model from Civitai, paste a Civitai API Key.
27
- 1. Input your HF user ID. e.g. 'yourid'.
28
- 1. Input your new repo name. If empty, auto-complete. e.g. 'newrepo'.
29
- 1. Set the parameters. If not sure, just use the defaults.
30
- 1. Click "Submit".
31
- 1. Patiently wait until the output changes. It takes approximately 2 to 3 minutes (on SDXL models downloading from HF).
32
- """)
33
- with gr.Column():
34
- dl_url = gr.Textbox(label="URL to download", placeholder="https://huggingface.co/bluepen5805/blue_pencil-XL/blob/main/blue_pencil-XL-v7.0.0.safetensors", value="", max_lines=1)
35
- with gr.Group():
36
- with gr.Row():
37
- hf_user = gr.Textbox(label="Your HF user ID", placeholder="username", value="", max_lines=1)
38
- hf_repo = gr.Textbox(label="New repo name", placeholder="reponame", info="If empty, auto-complete", value="", max_lines=1)
39
- with gr.Row(equal_height=True):
40
- with gr.Column():
41
- hf_token = gr.Textbox(label="Your HF write token", placeholder="hf_...", value="", max_lines=1)
42
- gr.Markdown("Your token is available at [hf.co/settings/tokens](https://huggingface.co/settings/tokens).", elem_classes="info")
43
- with gr.Column():
44
- civitai_key = gr.Textbox(label="Your Civitai API Key (Optional)", info="If you download model from Civitai...", placeholder="", value="", max_lines=1)
45
- gr.Markdown("Your Civitai API key is available at [https://civitai.com/user/account](https://civitai.com/user/account).", elem_classes="info")
46
- with gr.Row():
47
- is_upload_sf = gr.Checkbox(label="Upload single safetensors file into new repo", value=False)
48
- is_private = gr.Checkbox(label="Create private repo", value=True)
49
- is_overwrite = gr.Checkbox(label="Overwrite repo", value=False)
50
- with gr.Tab("SDXL"):
51
- with gr.Group():
52
- sdxl_presets = gr.Radio(label="Presets", choices=list(sdxl_preset_dict.keys()), value=list(sdxl_preset_dict.keys())[0])
53
- sdxl_mtype = gr.Textbox(value="SDXL", visible=False)
54
- with gr.Row():
55
- sdxl_dtype = gr.Radio(label="Output data type", choices=get_dtypes(), value=DEFAULT_DTYPE)
56
- sdxl_ema = gr.Checkbox(label="Extract EMA", info="For SD 1.5", value=True, visible=False)
57
- sdxl_base_repo = gr.Dropdown(label="Base repo ID", choices=FLUX_BASE_REPOS, value=FLUX_BASE_REPOS[0], allow_custom_value=True, visible=False)
58
- with gr.Accordion("Advanced settings", open=False):
59
- with gr.Row():
60
- sdxl_vae = gr.Dropdown(label="VAE", choices=sdxl_vaes, value="", allow_custom_value=True)
61
- sdxl_clip = gr.Dropdown(label="CLIP", choices=clips, value="", allow_custom_value=True)
62
- sdxl_t5 = gr.Dropdown(label="T5", choices=t5s, value="", allow_custom_value=True, visible=False)
63
- sdxl_scheduler = gr.Dropdown(label="Scheduler (Sampler)", choices=schedulers, value="Euler a")
64
- with gr.Row():
65
- with gr.Column():
66
- sdxl_lora1 = gr.Dropdown(label="LoRA1", choices=sdxl_loras, value="", allow_custom_value=True, min_width=320)
67
- sdxl_lora1s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA1 weight scale")
68
- with gr.Column():
69
- sdxl_lora2 = gr.Dropdown(label="LoRA2", choices=sdxl_loras, value="", allow_custom_value=True, min_width=320)
70
- sdxl_lora2s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA2 weight scale")
71
- with gr.Column():
72
- sdxl_lora3 = gr.Dropdown(label="LoRA3", choices=sdxl_loras, value="", allow_custom_value=True, min_width=320)
73
- sdxl_lora3s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA3 weight scale")
74
- with gr.Column():
75
- sdxl_lora4 = gr.Dropdown(label="LoRA4", choices=sdxl_loras, value="", allow_custom_value=True, min_width=320)
76
- sdxl_lora4s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA4 weight scale")
77
- with gr.Column():
78
- sdxl_lora5 = gr.Dropdown(label="LoRA5", choices=sdxl_loras, value="", allow_custom_value=True, min_width=320)
79
- sdxl_lora5s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA5 weight scale")
80
- sdxl_run_button = gr.Button(value="Submit", variant="primary")
81
- with gr.Tab("SD 1.5"):
82
- with gr.Group():
83
- sd15_presets = gr.Radio(label="Presets", choices=list(sd15_preset_dict.keys()), value=list(sd15_preset_dict.keys())[0])
84
- sd15_mtype = gr.Textbox(value="SD 1.5", visible=False)
85
- with gr.Row():
86
- sd15_dtype = gr.Radio(label="Output data type", choices=get_dtypes(), value=DEFAULT_DTYPE)
87
- sd15_ema = gr.Checkbox(label="Extract EMA", info="For SD 1.5", value=True, visible=True)
88
- sd15_base_repo = gr.Dropdown(label="Base repo ID", choices=FLUX_BASE_REPOS, value=FLUX_BASE_REPOS[0], allow_custom_value=True, visible=False)
89
- with gr.Accordion("Advanced settings", open=False):
90
- with gr.Row():
91
- sd15_vae = gr.Dropdown(label="VAE", choices=sd15_vaes, value="", allow_custom_value=True)
92
- sd15_clip = gr.Dropdown(label="CLIP", choices=clips, value="", allow_custom_value=True)
93
- sd15_t5 = gr.Dropdown(label="T5", choices=t5s, value="", allow_custom_value=True, visible=False)
94
- sd15_scheduler = gr.Dropdown(label="Scheduler (Sampler)", choices=schedulers, value="Euler")
95
- with gr.Row():
96
- with gr.Column():
97
- sd15_lora1 = gr.Dropdown(label="LoRA1", choices=sd15_loras, value="", allow_custom_value=True, min_width=320)
98
- sd15_lora1s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA1 weight scale")
99
- with gr.Column():
100
- sd15_lora2 = gr.Dropdown(label="LoRA2", choices=sd15_loras, value="", allow_custom_value=True, min_width=320)
101
- sd15_lora2s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA2 weight scale")
102
- with gr.Column():
103
- sd15_lora3 = gr.Dropdown(label="LoRA3", choices=sd15_loras, value="", allow_custom_value=True, min_width=320)
104
- sd15_lora3s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA3 weight scale")
105
- with gr.Column():
106
- sd15_lora4 = gr.Dropdown(label="LoRA4", choices=sd15_loras, value="", allow_custom_value=True, min_width=320)
107
- sd15_lora4s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA4 weight scale")
108
- with gr.Column():
109
- sd15_lora5 = gr.Dropdown(label="LoRA5", choices=sd15_loras, value="", allow_custom_value=True, min_width=320)
110
- sd15_lora5s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA5 weight scale")
111
- sd15_run_button = gr.Button(value="Submit", variant="primary")
112
- with gr.Tab("FLUX.1"):
113
- with gr.Group():
114
- flux_presets = gr.Radio(label="Presets", choices=list(flux_preset_dict.keys()), value=list(flux_preset_dict.keys())[0])
115
- flux_mtype = gr.Textbox(value="FLUX", visible=False)
116
- with gr.Row():
117
- flux_dtype = gr.Radio(label="Output data type", choices=get_dtypes(), value="bf16")
118
- flux_ema = gr.Checkbox(label="Extract EMA", info="For SD 1.5", value=True, visible=False)
119
- flux_base_repo = gr.Dropdown(label="Base repo ID", choices=FLUX_BASE_REPOS, value=FLUX_BASE_REPOS[0], allow_custom_value=True, visible=True)
120
- with gr.Accordion("Advanced settings", open=False):
121
- with gr.Row():
122
- flux_vae = gr.Dropdown(label="VAE", choices=flux_vaes, value="", allow_custom_value=True)
123
- flux_clip = gr.Dropdown(label="CLIP", choices=clips, value="", allow_custom_value=True)
124
- flux_t5 = gr.Dropdown(label="T5", choices=t5s, value="", allow_custom_value=True)
125
- flux_scheduler = gr.Dropdown(label="Scheduler (Sampler)", choices=[""], value="", visible=False)
126
- with gr.Row():
127
- with gr.Column():
128
- flux_lora1 = gr.Dropdown(label="LoRA1", choices=flux_loras, value="", allow_custom_value=True, min_width=320)
129
- flux_lora1s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA1 weight scale")
130
- with gr.Column():
131
- flux_lora2 = gr.Dropdown(label="LoRA2", choices=flux_loras, value="", allow_custom_value=True, min_width=320)
132
- flux_lora2s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA2 weight scale")
133
- with gr.Column():
134
- flux_lora3 = gr.Dropdown(label="LoRA3", choices=flux_loras, value="", allow_custom_value=True, min_width=320)
135
- flux_lora3s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA3 weight scale")
136
- with gr.Column():
137
- flux_lora4 = gr.Dropdown(label="LoRA4", choices=flux_loras, value="", allow_custom_value=True, min_width=320)
138
- flux_lora4s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA4 weight scale")
139
- with gr.Column():
140
- flux_lora5 = gr.Dropdown(label="LoRA5", choices=flux_loras, value="", allow_custom_value=True, min_width=320)
141
- flux_lora5s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA5 weight scale")
142
- flux_run_button = gr.Button(value="Submit", variant="primary")
143
- with gr.Tab("SD 3.5"):
144
- with gr.Group():
145
- sd35_presets = gr.Radio(label="Presets", choices=list(sd35_preset_dict.keys()), value=list(sd35_preset_dict.keys())[0])
146
- sd35_mtype = gr.Textbox(value="SD 3.5", visible=False)
147
- with gr.Row():
148
- sd35_dtype = gr.Radio(label="Output data type", choices=get_dtypes(), value="bf16")
149
- sd35_ema = gr.Checkbox(label="Extract EMA", info="For SD 1.5", value=True, visible=False)
150
- sd35_base_repo = gr.Dropdown(label="Base repo ID", choices=SD35_BASE_REPOS, value=SD35_BASE_REPOS[0], allow_custom_value=True, visible=True)
151
- with gr.Accordion("Advanced settings", open=False):
152
- with gr.Row():
153
- sd35_vae = gr.Dropdown(label="VAE", choices=sd35_vaes, value="", allow_custom_value=True)
154
- sd35_clip = gr.Dropdown(label="CLIP", choices=clips, value="", allow_custom_value=True)
155
- sd35_t5 = gr.Dropdown(label="T5", choices=t5s, value="", allow_custom_value=True)
156
- sd35_scheduler = gr.Dropdown(label="Scheduler (Sampler)", choices=[""], value="", visible=False)
157
- with gr.Row():
158
- with gr.Column():
159
- sd35_lora1 = gr.Dropdown(label="LoRA1", choices=sd35_loras, value="", allow_custom_value=True, min_width=320)
160
- sd35_lora1s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA1 weight scale")
161
- with gr.Column():
162
- sd35_lora2 = gr.Dropdown(label="LoRA2", choices=sd35_loras, value="", allow_custom_value=True, min_width=320)
163
- sd35_lora2s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA2 weight scale")
164
- with gr.Column():
165
- sd35_lora3 = gr.Dropdown(label="LoRA3", choices=sd35_loras, value="", allow_custom_value=True, min_width=320)
166
- sd35_lora3s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA3 weight scale")
167
- with gr.Column():
168
- sd35_lora4 = gr.Dropdown(label="LoRA4", choices=sd35_loras, value="", allow_custom_value=True, min_width=320)
169
- sd35_lora4s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA4 weight scale")
170
- with gr.Column():
171
- sd35_lora5 = gr.Dropdown(label="LoRA5", choices=sd35_loras, value="", allow_custom_value=True, min_width=320)
172
- sd35_lora5s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA5 weight scale")
173
- sd35_run_button = gr.Button(value="Submit", variant="primary")
174
- with gr.Group():
175
- repo_urls = gr.CheckboxGroup(visible=False, choices=[], value=[])
176
- output_md = gr.Markdown(label="Output", value="<br><br>", elem_classes="result")
177
- clear_button = gr.Button(value="Clear Output", variant="secondary")
178
- gr.DuplicateButton(value="Duplicate Space")
179
-
180
- gr.on(
181
- triggers=[sdxl_run_button.click],
182
- fn=convert_url_to_diffusers_repo,
183
- inputs=[dl_url, hf_user, hf_repo, hf_token, civitai_key, is_private, is_overwrite, is_upload_sf, repo_urls,
184
- sdxl_dtype, sdxl_vae, sdxl_clip, sdxl_t5, sdxl_scheduler, sdxl_ema, sdxl_base_repo, sdxl_mtype,
185
- sdxl_lora1, sdxl_lora1s, sdxl_lora2, sdxl_lora2s, sdxl_lora3, sdxl_lora3s, sdxl_lora4, sdxl_lora4s, sdxl_lora5, sdxl_lora5s],
186
- outputs=[repo_urls, output_md],
187
- )
188
- sdxl_presets.change(
189
- fn=sdxl_set_presets,
190
- inputs=[sdxl_presets],
191
- outputs=[sdxl_dtype, sdxl_vae, sdxl_scheduler, sdxl_lora1, sdxl_lora1s, sdxl_lora2, sdxl_lora2s, sdxl_lora3, sdxl_lora3s,
192
- sdxl_lora4, sdxl_lora4s, sdxl_lora5, sdxl_lora5s],
193
- queue=False,
194
- )
195
- gr.on(
196
- triggers=[sd15_run_button.click],
197
- fn=convert_url_to_diffusers_repo,
198
- inputs=[dl_url, hf_user, hf_repo, hf_token, civitai_key, is_private, is_overwrite, is_upload_sf, repo_urls,
199
- sd15_dtype, sd15_vae, sd15_clip, sd15_t5, sd15_scheduler, sd15_ema, sd15_base_repo, sd15_mtype,
200
- sd15_lora1, sd15_lora1s, sd15_lora2, sd15_lora2s, sd15_lora3, sd15_lora3s, sd15_lora4, sd15_lora4s, sd15_lora5, sd15_lora5s],
201
- outputs=[repo_urls, output_md],
202
- )
203
- sd15_presets.change(
204
- fn=sd15_set_presets,
205
- inputs=[sd15_presets],
206
- outputs=[sd15_dtype, sd15_vae, sd15_scheduler, sd15_lora1, sd15_lora1s, sd15_lora2, sd15_lora2s, sd15_lora3, sd15_lora3s,
207
- sd15_lora4, sd15_lora4s, sd15_lora5, sd15_lora5s, sd15_ema],
208
- queue=False,
209
- )
210
- gr.on(
211
- triggers=[flux_run_button.click],
212
- fn=convert_url_to_diffusers_repo,
213
- inputs=[dl_url, hf_user, hf_repo, hf_token, civitai_key, is_private, is_overwrite, is_upload_sf, repo_urls,
214
- flux_dtype, flux_vae, flux_clip, flux_t5, flux_scheduler, flux_ema, flux_base_repo, flux_mtype,
215
- flux_lora1, flux_lora1s, flux_lora2, flux_lora2s, flux_lora3, flux_lora3s, flux_lora4, flux_lora4s, flux_lora5, flux_lora5s],
216
- outputs=[repo_urls, output_md],
217
- )
218
- flux_presets.change(
219
- fn=flux_set_presets,
220
- inputs=[flux_presets],
221
- outputs=[flux_dtype, flux_vae, flux_scheduler, flux_lora1, flux_lora1s, flux_lora2, flux_lora2s, flux_lora3, flux_lora3s,
222
- flux_lora4, flux_lora4s, flux_lora5, flux_lora5s, flux_base_repo],
223
- queue=False,
224
- )
225
- gr.on(
226
- triggers=[sd35_run_button.click],
227
- fn=convert_url_to_diffusers_repo,
228
- inputs=[dl_url, hf_user, hf_repo, hf_token, civitai_key, is_private, is_overwrite, is_upload_sf, repo_urls,
229
- sd35_dtype, sd35_vae, sd35_clip, sd35_t5, sd35_scheduler, sd35_ema, sd35_base_repo, sd35_mtype,
230
- sd35_lora1, sd35_lora1s, sd35_lora2, sd35_lora2s, sd35_lora3, sd35_lora3s, sd35_lora4, sd35_lora4s, sd35_lora5, sd35_lora5s],
231
- outputs=[repo_urls, output_md],
232
- )
233
- sd35_presets.change(
234
- fn=sd35_set_presets,
235
- inputs=[sd35_presets],
236
- outputs=[sd35_dtype, sd35_vae, sd35_scheduler, sd35_lora1, sd35_lora1s, sd35_lora2, sd35_lora2s, sd35_lora3, sd35_lora3s,
237
- sd35_lora4, sd35_lora4s, sd35_lora5, sd35_lora5s, sd35_base_repo],
238
- queue=False,
239
- )
240
- clear_button.click(lambda: ([], "<br><br>"), None, [repo_urls, output_md], queue=False, show_api=False)
241
-
242
- demo.queue()
243
- demo.launch()
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from convert_url_to_diffusers_multi_gr import convert_url_to_diffusers_repo, get_dtypes, FLUX_BASE_REPOS, SD35_BASE_REPOS
3
+ from presets import (DEFAULT_DTYPE, schedulers, clips, t5s, sdxl_vaes, sdxl_loras, sdxl_preset_dict, sdxl_set_presets,
4
+ sd15_vaes, sd15_loras, sd15_preset_dict, sd15_set_presets, flux_vaes, flux_loras, flux_preset_dict, flux_set_presets,
5
+ sd35_vaes, sd35_loras, sd35_preset_dict, sd35_set_presets)
6
+ import os
7
+
8
+
9
+ HF_USER = os.getenv("HF_USER", "")
10
+ HF_REPO = os.getenv("HF_REPO", "")
11
+ HF_URL = os.getenv("HF_URL", "")
12
+ HF_OW = os.getenv("HF_OW", False)
13
+ HF_PR = os.getenv("HF_PR", False)
14
+
15
+ css = """
16
+ .title { font-size: 3em; align-items: center; text-align: center; }
17
+ .info { align-items: center; text-align: center; }
18
+ .block.result { margin: 1em 0; padding: 1em; box-shadow: 0 0 3px 3px #664422, 0 0 3px 2px #664422 inset; border-radius: 6px; background: #665544; }
19
+ """
20
+
21
+ with gr.Blocks(theme="theNeofr/Syne", fill_width=True, css=css, delete_cache=(60, 3600)) as demo:
22
+ gr.Markdown("# Download SDXL / SD 1.5 / SD 3.5 / FLUX.1 safetensors and convert to HF🤗 Diffusers format and create your repo", elem_classes="title")
23
+ gr.Markdown(f"""
24
+ ### ⚠️IMPORTANT NOTICE⚠️<br>
25
+ It's dangerous to expose your access token or key to others.
26
+ If you do use it, I recommend that you duplicate this space on your own HF account in advance.
27
+ Keys and tokens could be set to **Secrets** (`HF_TOKEN`, `CIVITAI_API_KEY`) if it's placed in your own space.
28
+ It saves you the trouble of typing them in.<br>
29
+ It barely works in the CPU space, but larger files can be converted if duplicated on the more powerful **Zero GPU** space.
30
+ In particular, conversion of FLUX.1 or SD 3.5 is almost impossible in CPU space.
31
+ ### The steps are the following:
32
+ 1. Paste a write-access token from [hf.co/settings/tokens](https://huggingface.co/settings/tokens).
33
+ 1. Input a model download url of the Hugging Face or Civitai or other sites.
34
+ 1. If you want to download a model from Civitai, paste a Civitai API Key.
35
+ 1. Input your HF user ID. e.g. 'yourid'.
36
+ 1. Input your new repo name. If empty, auto-complete. e.g. 'newrepo'.
37
+ 1. Set the parameters. If not sure, just use the defaults.
38
+ 1. Click "Submit".
39
+ 1. Patiently wait until the output changes. It takes approximately 2 to 3 minutes (on SDXL models downloading from HF).
40
+ """)
41
+ with gr.Column():
42
+ dl_url = gr.Textbox(label="URL to download", placeholder="https://huggingface.co/bluepen5805/blue_pencil-XL/blob/main/blue_pencil-XL-v7.0.0.safetensors",
43
+ value=HF_URL, max_lines=1)
44
+ with gr.Group():
45
+ with gr.Row():
46
+ hf_user = gr.Textbox(label="Your HF user ID", placeholder="username", value=HF_USER, max_lines=1)
47
+ hf_repo = gr.Textbox(label="New repo name", placeholder="reponame", info="If empty, auto-complete", value=HF_REPO, max_lines=1)
48
+ with gr.Row(equal_height=True):
49
+ with gr.Column():
50
+ hf_token = gr.Textbox(label="Your HF write token", placeholder="hf_...", value="", max_lines=1)
51
+ gr.Markdown("Your token is available at [hf.co/settings/tokens](https://huggingface.co/settings/tokens).", elem_classes="info")
52
+ with gr.Column():
53
+ civitai_key = gr.Textbox(label="Your Civitai API Key (Optional)", info="If you download model from Civitai...", placeholder="", value="", max_lines=1)
54
+ gr.Markdown("Your Civitai API key is available at [https://civitai.com/user/account](https://civitai.com/user/account).", elem_classes="info")
55
+ with gr.Row():
56
+ is_upload_sf = gr.Checkbox(label="Upload single safetensors file into new repo", value=False)
57
+ is_private = gr.Checkbox(label="Create private repo", value=True)
58
+ gated = gr.Radio(label="Create gated repo", info="Gated repo must be public", choices=["auto", "manual", "False"], value="False")
59
+ with gr.Row():
60
+ is_overwrite = gr.Checkbox(label="Overwrite repo", value=HF_OW)
61
+ is_pr = gr.Checkbox(label="Create PR", value=HF_PR)
62
+ with gr.Tab("SDXL"):
63
+ with gr.Group():
64
+ sdxl_presets = gr.Radio(label="Presets", choices=list(sdxl_preset_dict.keys()), value=list(sdxl_preset_dict.keys())[0])
65
+ sdxl_mtype = gr.Textbox(value="SDXL", visible=False)
66
+ sdxl_dtype = gr.Radio(label="Output data type", choices=get_dtypes(), value=DEFAULT_DTYPE)
67
+ with gr.Accordion("Advanced settings", open=False):
68
+ with gr.Row():
69
+ sdxl_vae = gr.Dropdown(label="VAE", choices=sdxl_vaes, value="", allow_custom_value=True)
70
+ sdxl_scheduler = gr.Dropdown(label="Scheduler (Sampler)", choices=schedulers, value="Euler a")
71
+ sdxl_clip = gr.Dropdown(label="CLIP", choices=clips, value="", allow_custom_value=True)
72
+ with gr.Column():
73
+ with gr.Row():
74
+ sdxl_lora1 = gr.Dropdown(label="LoRA1", choices=sdxl_loras, value="", allow_custom_value=True, min_width=320, scale=2)
75
+ sdxl_lora1s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA1 weight scale")
76
+ with gr.Row():
77
+ sdxl_lora2 = gr.Dropdown(label="LoRA2", choices=sdxl_loras, value="", allow_custom_value=True, min_width=320, scale=2)
78
+ sdxl_lora2s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA2 weight scale")
79
+ with gr.Row():
80
+ sdxl_lora3 = gr.Dropdown(label="LoRA3", choices=sdxl_loras, value="", allow_custom_value=True, min_width=320, scale=2)
81
+ sdxl_lora3s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA3 weight scale")
82
+ with gr.Row():
83
+ sdxl_lora4 = gr.Dropdown(label="LoRA4", choices=sdxl_loras, value="", allow_custom_value=True, min_width=320, scale=2)
84
+ sdxl_lora4s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA4 weight scale")
85
+ with gr.Row():
86
+ sdxl_lora5 = gr.Dropdown(label="LoRA5", choices=sdxl_loras, value="", allow_custom_value=True, min_width=320, scale=2)
87
+ sdxl_lora5s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA5 weight scale")
88
+ sdxl_run_button = gr.Button(value="Submit", variant="primary")
89
+ with gr.Tab("SD 1.5"):
90
+ with gr.Group():
91
+ sd15_presets = gr.Radio(label="Presets", choices=list(sd15_preset_dict.keys()), value=list(sd15_preset_dict.keys())[0])
92
+ sd15_mtype = gr.Textbox(value="SD 1.5", visible=False)
93
+ sd15_dtype = gr.Radio(label="Output data type", choices=get_dtypes(), value=DEFAULT_DTYPE)
94
+ with gr.Row():
95
+ sd15_ema = gr.Checkbox(label="Extract EMA", value=True, visible=True)
96
+ sd15_isize = gr.Radio(label="Image size", choices=["768", "512"], value="768")
97
+ sd15_sc = gr.Checkbox(label="Safety checker", value=False)
98
+ with gr.Accordion("Advanced settings", open=False):
99
+ with gr.Row():
100
+ sd15_vae = gr.Dropdown(label="VAE", choices=sd15_vaes, value="", allow_custom_value=True)
101
+ sd15_scheduler = gr.Dropdown(label="Scheduler (Sampler)", choices=schedulers, value="Euler")
102
+ sd15_clip = gr.Dropdown(label="CLIP", choices=clips, value="", allow_custom_value=True)
103
+ with gr.Column():
104
+ with gr.Row():
105
+ sd15_lora1 = gr.Dropdown(label="LoRA1", choices=sd15_loras, value="", allow_custom_value=True, min_width=320, scale=2)
106
+ sd15_lora1s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA1 weight scale")
107
+ with gr.Row():
108
+ sd15_lora2 = gr.Dropdown(label="LoRA2", choices=sd15_loras, value="", allow_custom_value=True, min_width=320, scale=2)
109
+ sd15_lora2s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA2 weight scale")
110
+ with gr.Row():
111
+ sd15_lora3 = gr.Dropdown(label="LoRA3", choices=sd15_loras, value="", allow_custom_value=True, min_width=320, scale=2)
112
+ sd15_lora3s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA3 weight scale")
113
+ with gr.Row():
114
+ sd15_lora4 = gr.Dropdown(label="LoRA4", choices=sd15_loras, value="", allow_custom_value=True, min_width=320, scale=2)
115
+ sd15_lora4s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA4 weight scale")
116
+ with gr.Row():
117
+ sd15_lora5 = gr.Dropdown(label="LoRA5", choices=sd15_loras, value="", allow_custom_value=True, min_width=320, scale=2)
118
+ sd15_lora5s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA5 weight scale")
119
+ sd15_run_button = gr.Button(value="Submit", variant="primary")
120
+ with gr.Tab("FLUX.1"):
121
+ with gr.Group():
122
+ flux_presets = gr.Radio(label="Presets", choices=list(flux_preset_dict.keys()), value=list(flux_preset_dict.keys())[0])
123
+ flux_mtype = gr.Textbox(value="FLUX", visible=False)
124
+ flux_dtype = gr.Radio(label="Output data type", choices=get_dtypes(), value="bf16")
125
+ flux_base_repo = gr.Dropdown(label="Base repo ID", choices=FLUX_BASE_REPOS, value=FLUX_BASE_REPOS[0], allow_custom_value=True, visible=True)
126
+ with gr.Accordion("Advanced settings", open=False):
127
+ with gr.Row():
128
+ flux_vae = gr.Dropdown(label="VAE", choices=flux_vaes, value="", allow_custom_value=True)
129
+ flux_scheduler = gr.Dropdown(label="Scheduler (Sampler)", choices=[""], value="", visible=False)
130
+ with gr.Row():
131
+ flux_clip = gr.Dropdown(label="CLIP", choices=clips, value="", allow_custom_value=True)
132
+ flux_t5 = gr.Dropdown(label="T5", choices=t5s, value="", allow_custom_value=True)
133
+ with gr.Column():
134
+ with gr.Row():
135
+ flux_lora1 = gr.Dropdown(label="LoRA1", choices=flux_loras, value="", allow_custom_value=True, min_width=320, scale=2)
136
+ flux_lora1s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA1 weight scale")
137
+ with gr.Row():
138
+ flux_lora2 = gr.Dropdown(label="LoRA2", choices=flux_loras, value="", allow_custom_value=True, min_width=320, scale=2)
139
+ flux_lora2s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA2 weight scale")
140
+ with gr.Row():
141
+ flux_lora3 = gr.Dropdown(label="LoRA3", choices=flux_loras, value="", allow_custom_value=True, min_width=320, scale=2)
142
+ flux_lora3s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA3 weight scale")
143
+ with gr.Row():
144
+ flux_lora4 = gr.Dropdown(label="LoRA4", choices=flux_loras, value="", allow_custom_value=True, min_width=320, scale=2)
145
+ flux_lora4s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA4 weight scale")
146
+ with gr.Row():
147
+ flux_lora5 = gr.Dropdown(label="LoRA5", choices=flux_loras, value="", allow_custom_value=True, min_width=320, scale=2)
148
+ flux_lora5s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA5 weight scale")
149
+ flux_run_button = gr.Button(value="Submit", variant="primary")
150
+ with gr.Tab("SD 3.5"):
151
+ with gr.Group():
152
+ sd35_presets = gr.Radio(label="Presets", choices=list(sd35_preset_dict.keys()), value=list(sd35_preset_dict.keys())[0])
153
+ sd35_mtype = gr.Textbox(value="SD 3.5", visible=False)
154
+ sd35_dtype = gr.Radio(label="Output data type", choices=get_dtypes(), value="bf16")
155
+ sd35_base_repo = gr.Dropdown(label="Base repo ID", choices=SD35_BASE_REPOS, value=SD35_BASE_REPOS[0], allow_custom_value=True, visible=True)
156
+ with gr.Accordion("Advanced settings", open=False):
157
+ with gr.Row():
158
+ sd35_vae = gr.Dropdown(label="VAE", choices=sd35_vaes, value="", allow_custom_value=True)
159
+ sd35_scheduler = gr.Dropdown(label="Scheduler (Sampler)", choices=[""], value="", visible=False)
160
+ with gr.Row():
161
+ sd35_clip = gr.Dropdown(label="CLIP", choices=clips, value="", allow_custom_value=True)
162
+ sd35_t5 = gr.Dropdown(label="T5", choices=t5s, value="", allow_custom_value=True)
163
+ with gr.Column():
164
+ with gr.Row():
165
+ sd35_lora1 = gr.Dropdown(label="LoRA1", choices=sd35_loras, value="", allow_custom_value=True, min_width=320, scale=2)
166
+ sd35_lora1s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA1 weight scale")
167
+ with gr.Row():
168
+ sd35_lora2 = gr.Dropdown(label="LoRA2", choices=sd35_loras, value="", allow_custom_value=True, min_width=320, scale=2)
169
+ sd35_lora2s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA2 weight scale")
170
+ with gr.Row():
171
+ sd35_lora3 = gr.Dropdown(label="LoRA3", choices=sd35_loras, value="", allow_custom_value=True, min_width=320, scale=2)
172
+ sd35_lora3s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA3 weight scale")
173
+ with gr.Row():
174
+ sd35_lora4 = gr.Dropdown(label="LoRA4", choices=sd35_loras, value="", allow_custom_value=True, min_width=320, scale=2)
175
+ sd35_lora4s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA4 weight scale")
176
+ with gr.Row():
177
+ sd35_lora5 = gr.Dropdown(label="LoRA5", choices=sd35_loras, value="", allow_custom_value=True, min_width=320, scale=2)
178
+ sd35_lora5s = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA5 weight scale")
179
+ sd35_run_button = gr.Button(value="Submit", variant="primary")
180
+ adv_args = gr.Textbox(label="Advanced arguments", value="", visible=False)
181
+ with gr.Group():
182
+ repo_urls = gr.CheckboxGroup(visible=False, choices=[], value=[])
183
+ output_md = gr.Markdown(label="Output", value="<br><br>", elem_classes="result")
184
+ clear_button = gr.Button(value="Clear Output", variant="secondary")
185
+ gr.DuplicateButton(value="Duplicate Space")
186
+
187
+ gr.Markdown("This webui was redesigned with ❤ by [theNeofr](https://huggingface.co/theNeofr)")
188
+ gr.on(
189
+ triggers=[sdxl_run_button.click],
190
+ fn=convert_url_to_diffusers_repo,
191
+ inputs=[dl_url, hf_user, hf_repo, hf_token, civitai_key, is_private, gated, is_overwrite, is_pr, is_upload_sf, repo_urls,
192
+ sdxl_dtype, sdxl_vae, sdxl_clip, flux_t5, sdxl_scheduler, sd15_ema, sd15_isize, sd15_sc, flux_base_repo, sdxl_mtype,
193
+ sdxl_lora1, sdxl_lora1s, sdxl_lora2, sdxl_lora2s, sdxl_lora3, sdxl_lora3s, sdxl_lora4, sdxl_lora4s, sdxl_lora5, sdxl_lora5s, adv_args],
194
+ outputs=[repo_urls, output_md],
195
+ )
196
+ sdxl_presets.change(
197
+ fn=sdxl_set_presets,
198
+ inputs=[sdxl_presets],
199
+ outputs=[sdxl_dtype, sdxl_vae, sdxl_scheduler, sdxl_lora1, sdxl_lora1s, sdxl_lora2, sdxl_lora2s, sdxl_lora3, sdxl_lora3s,
200
+ sdxl_lora4, sdxl_lora4s, sdxl_lora5, sdxl_lora5s],
201
+ queue=False,
202
+ )
203
+ gr.on(
204
+ triggers=[sd15_run_button.click],
205
+ fn=convert_url_to_diffusers_repo,
206
+ inputs=[dl_url, hf_user, hf_repo, hf_token, civitai_key, is_private, gated, is_overwrite, is_pr, is_upload_sf, repo_urls,
207
+ sd15_dtype, sd15_vae, sd15_clip, flux_t5, sd15_scheduler, sd15_ema, sd15_isize, sd15_sc, flux_base_repo, sd15_mtype,
208
+ sd15_lora1, sd15_lora1s, sd15_lora2, sd15_lora2s, sd15_lora3, sd15_lora3s, sd15_lora4, sd15_lora4s, sd15_lora5, sd15_lora5s, adv_args],
209
+ outputs=[repo_urls, output_md],
210
+ )
211
+ sd15_presets.change(
212
+ fn=sd15_set_presets,
213
+ inputs=[sd15_presets],
214
+ outputs=[sd15_dtype, sd15_vae, sd15_scheduler, sd15_lora1, sd15_lora1s, sd15_lora2, sd15_lora2s, sd15_lora3, sd15_lora3s,
215
+ sd15_lora4, sd15_lora4s, sd15_lora5, sd15_lora5s, sd15_ema],
216
+ queue=False,
217
+ )
218
+ gr.on(
219
+ triggers=[flux_run_button.click],
220
+ fn=convert_url_to_diffusers_repo,
221
+ inputs=[dl_url, hf_user, hf_repo, hf_token, civitai_key, is_private, gated, is_overwrite, is_pr, is_upload_sf, repo_urls,
222
+ flux_dtype, flux_vae, flux_clip, flux_t5, flux_scheduler, sd15_ema, sd15_isize, sd15_sc, flux_base_repo, flux_mtype,
223
+ flux_lora1, flux_lora1s, flux_lora2, flux_lora2s, flux_lora3, flux_lora3s, flux_lora4, flux_lora4s, flux_lora5, flux_lora5s, adv_args],
224
+ outputs=[repo_urls, output_md],
225
+ )
226
+ flux_presets.change(
227
+ fn=flux_set_presets,
228
+ inputs=[flux_presets],
229
+ outputs=[flux_dtype, flux_vae, flux_scheduler, flux_lora1, flux_lora1s, flux_lora2, flux_lora2s, flux_lora3, flux_lora3s,
230
+ flux_lora4, flux_lora4s, flux_lora5, flux_lora5s, flux_base_repo],
231
+ queue=False,
232
+ )
233
+ gr.on(
234
+ triggers=[sd35_run_button.click],
235
+ fn=convert_url_to_diffusers_repo,
236
+ inputs=[dl_url, hf_user, hf_repo, hf_token, civitai_key, is_private, gated, is_overwrite, is_pr, is_upload_sf, repo_urls,
237
+ sd35_dtype, sd35_vae, sd35_clip, sd35_t5, sd35_scheduler, sd15_ema, sd15_isize, sd15_sc, sd35_base_repo, sd35_mtype,
238
+ sd35_lora1, sd35_lora1s, sd35_lora2, sd35_lora2s, sd35_lora3, sd35_lora3s, sd35_lora4, sd35_lora4s, sd35_lora5, sd35_lora5s, adv_args],
239
+ outputs=[repo_urls, output_md],
240
+ )
241
+ sd35_presets.change(
242
+ fn=sd35_set_presets,
243
+ inputs=[sd35_presets],
244
+ outputs=[sd35_dtype, sd35_vae, sd35_scheduler, sd35_lora1, sd35_lora1s, sd35_lora2, sd35_lora2s, sd35_lora3, sd35_lora3s,
245
+ sd35_lora4, sd35_lora4s, sd35_lora5, sd35_lora5s, sd35_base_repo],
246
+ queue=False,
247
+ )
248
+ clear_button.click(lambda: ([], "<br><br>"), None, [repo_urls, output_md], queue=False, show_api=False)
249
+
250
+ demo.queue()
251
+ demo.launch(ssr_mode=False)
convert_url_to_diffusers_multi_gr.py CHANGED
@@ -4,19 +4,20 @@ import argparse
4
  from pathlib import Path
5
  import os
6
  import torch
7
- from diffusers import (DiffusionPipeline, AutoencoderKL, FlowMatchEulerDiscreteScheduler, StableDiffusionXLPipeline, StableDiffusionPipeline,
8
- FluxPipeline, FluxTransformer2DModel, SD3Transformer2DModel, StableDiffusion3Pipeline)
9
- from transformers import CLIPTokenizer, CLIPTextModel, CLIPTextModelWithProjection, AutoTokenizer, T5EncoderModel, BitsAndBytesConfig as TFBitsAndBytesConfig
10
  from huggingface_hub import save_torch_state_dict, snapshot_download
11
  from diffusers.loaders.single_file_utils import (convert_flux_transformer_checkpoint_to_diffusers, convert_sd3_transformer_checkpoint_to_diffusers,
12
  convert_sd3_t5_checkpoint_to_diffusers)
 
13
  import safetensors.torch
14
  import gradio as gr
15
  import shutil
16
  import gc
17
  import tempfile
18
  # also requires aria, gdown, peft, huggingface_hub, safetensors, transformers, accelerate, pytorch_lightning
19
- from utils import (get_token, set_token, is_repo_exists, is_repo_name, get_download_file, upload_repo)
20
  from sdutils import (SCHEDULER_CONFIG_MAP, get_scheduler_config, fuse_loras, DTYPE_DEFAULT, get_dtype, get_dtypes, get_model_type_from_key, get_process_dtype)
21
 
22
 
@@ -204,7 +205,7 @@ def convert_sd35_fp8_cpu(new_file: str, new_dir: str, dtype: str, base_repo: str
204
 
205
  #@spaces.GPU(duration=60)
206
  def load_and_save_pipeline(pipe, model_type: str, url: str, new_file: str, new_dir: str, dtype: str,
207
- scheduler: str, ema: bool, base_repo: str, civitai_key: str, lora_dict: dict,
208
  my_vae, my_clip_tokenizer, my_clip_encoder, my_t5_tokenizer, my_t5_encoder,
209
  kwargs: dict, dkwargs: dict, progress=gr.Progress(track_tqdm=True)):
210
  try:
@@ -223,6 +224,9 @@ def load_and_save_pipeline(pipe, model_type: str, url: str, new_file: str, new_d
223
  if dtype == "NF4" and nf4_config is not None and nf4_config_tf is not None:
224
  qkwargs["quantization_config"] = nf4_config
225
  tfqkwargs["quantization_config"] = nf4_config_tf
 
 
 
226
 
227
  #t5 = None
228
 
@@ -234,12 +238,19 @@ def load_and_save_pipeline(pipe, model_type: str, url: str, new_file: str, new_d
234
  pipe.scheduler = sconf[0].from_config(pipe.scheduler.config, **sconf[1])
235
  pipe.save_pretrained(new_dir)
236
  elif model_type == "SD 1.5":
237
- if is_repo_name(url): pipe = StableDiffusionPipeline.from_pretrained(url, extract_ema=ema, requires_safety_checker=False,
238
- use_safetensors=True, **kwargs, **dkwargs, token=hf_token)
239
- else: pipe = StableDiffusionPipeline.from_single_file(new_file, extract_ema=ema, requires_safety_checker=False, use_safetensors=True, **kwargs, **dkwargs)
 
 
 
 
 
 
240
  pipe = fuse_loras(pipe, lora_dict, temp_dir, civitai_key, dkwargs)
241
  sconf = get_scheduler_config(scheduler)
242
  pipe.scheduler = sconf[0].from_config(pipe.scheduler.config, **sconf[1])
 
243
  pipe.save_pretrained(new_dir)
244
  elif model_type == "FLUX":
245
  if dtype != "fp8":
@@ -282,31 +293,34 @@ def load_and_save_pipeline(pipe, model_type: str, url: str, new_file: str, new_d
282
  pipe.save_pretrained(new_dir)
283
  except Exception as e:
284
  print(f"Failed to load pipeline. {e}")
285
- raise Exception("Failed to load pipeline.") from e
286
  finally:
287
  return pipe
288
 
289
 
290
  def convert_url_to_diffusers(url: str, civitai_key: str="", is_upload_sf: bool=False, dtype: str="fp16", vae: str="", clip: str="", t5: str="",
291
- scheduler: str="Euler a", ema: bool=True, base_repo: str="", mtype: str="", lora_dict: dict={}, is_local: bool=True, progress=gr.Progress(track_tqdm=True)):
 
292
  try:
293
  hf_token = get_token()
294
  progress(0, desc="Start converting...")
295
  temp_dir = TEMP_DIR
 
296
 
297
  if is_repo_name(url) and is_repo_exists(url):
298
  new_file = url
299
  model_type = mtype
300
  else:
301
  new_file = get_download_file(temp_dir, url, civitai_key)
302
- if not new_file: raise Exception(f"Safetensors file not found: {url}")
 
 
303
  model_type = get_model_type_from_key(new_file)
304
  new_dir = Path(new_file).stem.replace(" ", "_").replace(",", "_").replace(".", "_") #
305
 
306
  kwargs = {}
307
  dkwargs = {}
308
  if dtype != DTYPE_DEFAULT: dkwargs["torch_dtype"] = get_process_dtype(dtype, model_type)
309
- pipe = None
310
 
311
  print(f"Model type: {model_type} / VAE: {vae} / CLIP: {clip} / T5: {t5} / Scheduler: {scheduler} / dtype: {dtype} / EMA: {ema} / Base repo: {base_repo} / LoRAs: {lora_dict}")
312
 
@@ -362,7 +376,7 @@ def convert_url_to_diffusers(url: str, civitai_key: str="", is_upload_sf: bool=F
362
  if my_t5_tokenizer: kwargs["tokenizer_2"] = my_t5_tokenizer
363
  if my_t5_encoder: kwargs["text_encoder_2"] = my_t5_encoder
364
 
365
- pipe = load_and_save_pipeline(pipe, model_type, url, new_file, new_dir, dtype, scheduler, ema, base_repo, civitai_key, lora_dict,
366
  my_vae, my_clip_tokenizer, my_clip_encoder, my_t5_tokenizer, my_t5_encoder, kwargs, dkwargs)
367
 
368
  if Path(new_dir).exists(): save_readme_md(new_dir, url)
@@ -375,35 +389,35 @@ def convert_url_to_diffusers(url: str, civitai_key: str="", is_upload_sf: bool=F
375
  return new_dir
376
  except Exception as e:
377
  print(f"Failed to convert. {e}")
378
- raise Exception("Failed to convert.") from e
379
  finally:
380
  del pipe
381
  torch.cuda.empty_cache()
382
  gc.collect()
383
 
384
 
385
- def convert_url_to_diffusers_repo(dl_url: str, hf_user: str, hf_repo: str, hf_token: str, civitai_key="", is_private: bool=True, is_overwrite: bool=False,
386
- is_upload_sf: bool=False, urls: list=[], dtype: str="fp16", vae: str="", clip: str="", t5: str="", scheduler: str="Euler a", ema: bool=True,
 
 
387
  base_repo: str="", mtype: str="", lora1: str="", lora1s=1.0, lora2: str="", lora2s=1.0, lora3: str="", lora3s=1.0,
388
- lora4: str="", lora4s=1.0, lora5: str="", lora5s=1.0, progress=gr.Progress(track_tqdm=True)):
389
  try:
390
  is_local = False
391
  if not civitai_key and os.environ.get("CIVITAI_API_KEY"): civitai_key = os.environ.get("CIVITAI_API_KEY") # default Civitai API key
392
  if not hf_token and os.environ.get("HF_TOKEN"): hf_token = os.environ.get("HF_TOKEN") # default HF write token
393
- if not hf_user and os.environ.get("HF_USER"): hf_user = os.environ.get("HF_USER") # default username
394
  if not hf_user: raise gr.Error(f"Invalid user name: {hf_user}")
395
- if not hf_repo and os.environ.get("HF_REPO"): hf_repo = os.environ.get("HF_REPO") # default reponame
396
- if not is_overwrite and os.environ.get("HF_OW"): is_overwrite = os.environ.get("HF_OW") # for debugging
397
- if not dl_url and os.environ.get("HF_URL"): dl_url = os.environ.get("HF_URL") # for debugging
398
  set_token(hf_token)
399
  lora_dict = {lora1: lora1s, lora2: lora2s, lora3: lora3s, lora4: lora4s, lora5: lora5s}
400
- new_path = convert_url_to_diffusers(dl_url, civitai_key, is_upload_sf, dtype, vae, clip, t5, scheduler, ema, base_repo, mtype, lora_dict, is_local)
401
  if not new_path: return ""
402
  new_repo_id = f"{hf_user}/{Path(new_path).stem}"
403
  if hf_repo != "": new_repo_id = f"{hf_user}/{hf_repo}"
404
  if not is_repo_name(new_repo_id): raise gr.Error(f"Invalid repo name: {new_repo_id}")
405
- if not is_overwrite and is_repo_exists(new_repo_id): raise gr.Error(f"Repo already exists: {new_repo_id}")
406
- repo_url = upload_repo(new_repo_id, new_path, is_private)
 
407
  safe_clean(new_path)
408
  if not urls: urls = []
409
  urls.append(repo_url)
 
4
  from pathlib import Path
5
  import os
6
  import torch
7
+ from diffusers import (DiffusionPipeline, AutoencoderKL, FlowMatchEulerDiscreteScheduler, StableDiffusionXLPipeline, StableDiffusionPipeline,
8
+ FluxPipeline, FluxTransformer2DModel, SD3Transformer2DModel, StableDiffusion3Pipeline)
9
+ from transformers import CLIPTokenizer, CLIPTextModel, CLIPTextModelWithProjection, CLIPFeatureExtractor, AutoTokenizer, T5EncoderModel, BitsAndBytesConfig as TFBitsAndBytesConfig
10
  from huggingface_hub import save_torch_state_dict, snapshot_download
11
  from diffusers.loaders.single_file_utils import (convert_flux_transformer_checkpoint_to_diffusers, convert_sd3_transformer_checkpoint_to_diffusers,
12
  convert_sd3_t5_checkpoint_to_diffusers)
13
+ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
14
  import safetensors.torch
15
  import gradio as gr
16
  import shutil
17
  import gc
18
  import tempfile
19
  # also requires aria, gdown, peft, huggingface_hub, safetensors, transformers, accelerate, pytorch_lightning
20
+ from utils import (get_token, set_token, is_repo_exists, is_repo_name, get_download_file, upload_repo, gate_repo)
21
  from sdutils import (SCHEDULER_CONFIG_MAP, get_scheduler_config, fuse_loras, DTYPE_DEFAULT, get_dtype, get_dtypes, get_model_type_from_key, get_process_dtype)
22
 
23
 
 
205
 
206
  #@spaces.GPU(duration=60)
207
  def load_and_save_pipeline(pipe, model_type: str, url: str, new_file: str, new_dir: str, dtype: str,
208
+ scheduler: str, ema: bool, image_size: str, is_safety_checker: bool, base_repo: str, civitai_key: str, lora_dict: dict,
209
  my_vae, my_clip_tokenizer, my_clip_encoder, my_t5_tokenizer, my_t5_encoder,
210
  kwargs: dict, dkwargs: dict, progress=gr.Progress(track_tqdm=True)):
211
  try:
 
224
  if dtype == "NF4" and nf4_config is not None and nf4_config_tf is not None:
225
  qkwargs["quantization_config"] = nf4_config
226
  tfqkwargs["quantization_config"] = nf4_config_tf
227
+
228
+ #print(f"model_type:{model_type}, dtype:{dtype}, scheduler:{scheduler}, ema:{ema}, base_repo:{base_repo}")
229
+ #print("lora_dict:", lora_dict, "kwargs:", kwargs, "dkwargs:", dkwargs)
230
 
231
  #t5 = None
232
 
 
238
  pipe.scheduler = sconf[0].from_config(pipe.scheduler.config, **sconf[1])
239
  pipe.save_pretrained(new_dir)
240
  elif model_type == "SD 1.5":
241
+ if is_safety_checker:
242
+ safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
243
+ feature_extractor = CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
244
+ kwargs["requires_safety_checker"] = True
245
+ kwargs["safety_checker"] = safety_checker
246
+ kwargs["feature_extractor"] = feature_extractor
247
+ else: kwargs["requires_safety_checker"] = False
248
+ if is_repo_name(url): pipe = StableDiffusionPipeline.from_pretrained(url, extract_ema=ema, use_safetensors=True, **kwargs, **dkwargs, token=hf_token)
249
+ else: pipe = StableDiffusionPipeline.from_single_file(new_file, extract_ema=ema, use_safetensors=True, **kwargs, **dkwargs)
250
  pipe = fuse_loras(pipe, lora_dict, temp_dir, civitai_key, dkwargs)
251
  sconf = get_scheduler_config(scheduler)
252
  pipe.scheduler = sconf[0].from_config(pipe.scheduler.config, **sconf[1])
253
+ if image_size != "512": pipe.vae = AutoencoderKL.from_config(pipe.vae.config, sample_size=int(image_size))
254
  pipe.save_pretrained(new_dir)
255
  elif model_type == "FLUX":
256
  if dtype != "fp8":
 
293
  pipe.save_pretrained(new_dir)
294
  except Exception as e:
295
  print(f"Failed to load pipeline. {e}")
296
+ raise Exception(f"Failed to load pipeline. {e}") from e
297
  finally:
298
  return pipe
299
 
300
 
301
  def convert_url_to_diffusers(url: str, civitai_key: str="", is_upload_sf: bool=False, dtype: str="fp16", vae: str="", clip: str="", t5: str="",
302
+ scheduler: str="Euler a", ema: bool=True, image_size: str="768", safety_checker: bool=False,
303
+ base_repo: str="", mtype: str="", lora_dict: dict={}, is_local: bool=True, progress=gr.Progress(track_tqdm=True)):
304
  try:
305
  hf_token = get_token()
306
  progress(0, desc="Start converting...")
307
  temp_dir = TEMP_DIR
308
+ pipe = None
309
 
310
  if is_repo_name(url) and is_repo_exists(url):
311
  new_file = url
312
  model_type = mtype
313
  else:
314
  new_file = get_download_file(temp_dir, url, civitai_key)
315
+ if not new_file or Path(new_file).suffix.lower() not in set([".safetensors", ".ckpt", ".bin", ".sft"]):
316
+ safe_clean(new_file)
317
+ raise Exception(f"Safetensors file not found: {url}")
318
  model_type = get_model_type_from_key(new_file)
319
  new_dir = Path(new_file).stem.replace(" ", "_").replace(",", "_").replace(".", "_") #
320
 
321
  kwargs = {}
322
  dkwargs = {}
323
  if dtype != DTYPE_DEFAULT: dkwargs["torch_dtype"] = get_process_dtype(dtype, model_type)
 
324
 
325
  print(f"Model type: {model_type} / VAE: {vae} / CLIP: {clip} / T5: {t5} / Scheduler: {scheduler} / dtype: {dtype} / EMA: {ema} / Base repo: {base_repo} / LoRAs: {lora_dict}")
326
 
 
376
  if my_t5_tokenizer: kwargs["tokenizer_2"] = my_t5_tokenizer
377
  if my_t5_encoder: kwargs["text_encoder_2"] = my_t5_encoder
378
 
379
+ pipe = load_and_save_pipeline(pipe, model_type, url, new_file, new_dir, dtype, scheduler, ema, image_size, safety_checker, base_repo, civitai_key, lora_dict,
380
  my_vae, my_clip_tokenizer, my_clip_encoder, my_t5_tokenizer, my_t5_encoder, kwargs, dkwargs)
381
 
382
  if Path(new_dir).exists(): save_readme_md(new_dir, url)
 
389
  return new_dir
390
  except Exception as e:
391
  print(f"Failed to convert. {e}")
392
+ raise Exception(f"Failed to convert. {e}") from e
393
  finally:
394
  del pipe
395
  torch.cuda.empty_cache()
396
  gc.collect()
397
 
398
 
399
+ def convert_url_to_diffusers_repo(dl_url: str, hf_user: str, hf_repo: str, hf_token: str, civitai_key="", is_private: bool=True,
400
+ gated: str="False", is_overwrite: bool=False, is_pr: bool=False,
401
+ is_upload_sf: bool=False, urls: list=[], dtype: str="fp16", vae: str="", clip: str="", t5: str="", scheduler: str="Euler a",
402
+ ema: bool=True, image_size: str="768", safety_checker: bool=False,
403
  base_repo: str="", mtype: str="", lora1: str="", lora1s=1.0, lora2: str="", lora2s=1.0, lora3: str="", lora3s=1.0,
404
+ lora4: str="", lora4s=1.0, lora5: str="", lora5s=1.0, args: str="", progress=gr.Progress(track_tqdm=True)):
405
  try:
406
  is_local = False
407
  if not civitai_key and os.environ.get("CIVITAI_API_KEY"): civitai_key = os.environ.get("CIVITAI_API_KEY") # default Civitai API key
408
  if not hf_token and os.environ.get("HF_TOKEN"): hf_token = os.environ.get("HF_TOKEN") # default HF write token
 
409
  if not hf_user: raise gr.Error(f"Invalid user name: {hf_user}")
410
+ if gated != "False" and is_private: raise gr.Error(f"Gated repo must be public")
 
 
411
  set_token(hf_token)
412
  lora_dict = {lora1: lora1s, lora2: lora2s, lora3: lora3s, lora4: lora4s, lora5: lora5s}
413
+ new_path = convert_url_to_diffusers(dl_url, civitai_key, is_upload_sf, dtype, vae, clip, t5, scheduler, ema, image_size, safety_checker, base_repo, mtype, lora_dict, is_local)
414
  if not new_path: return ""
415
  new_repo_id = f"{hf_user}/{Path(new_path).stem}"
416
  if hf_repo != "": new_repo_id = f"{hf_user}/{hf_repo}"
417
  if not is_repo_name(new_repo_id): raise gr.Error(f"Invalid repo name: {new_repo_id}")
418
+ if not is_overwrite and is_repo_exists(new_repo_id) and not is_pr: raise gr.Error(f"Repo already exists: {new_repo_id}")
419
+ repo_url = upload_repo(new_repo_id, new_path, is_private, is_pr)
420
+ gate_repo(new_repo_id, gated)
421
  safe_clean(new_path)
422
  if not urls: urls = []
423
  urls.append(repo_url)
presets.py CHANGED
@@ -1,134 +1,147 @@
1
- from sdutils import get_dtypes, SCHEDULER_CONFIG_MAP
2
-
3
-
4
- DEFAULT_DTYPE = get_dtypes()[0]
5
- schedulers = list(SCHEDULER_CONFIG_MAP.keys())
6
-
7
-
8
- clips = [
9
- "",
10
- "openai/clip-vit-large-patch14",
11
- ]
12
-
13
-
14
- t5s = [
15
- "",
16
- "https://huggingface.co/camenduru/FLUX.1-dev/blob/main/t5xxl_fp8_e4m3fn.safetensors",
17
- ]
18
-
19
-
20
- sdxl_vaes = [
21
- "",
22
- "https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/blob/main/sdxl.vae.safetensors",
23
- "https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/blob/main/sdxl_vae-fp16fix-blessed.safetensors",
24
- "https://huggingface.co/John6666/safetensors_converting_test/blob/main/xlVAEC_e7.safetensors",
25
- "https://huggingface.co/John6666/safetensors_converting_test/blob/main/xlVAEC_f1.safetensors",
26
- ]
27
-
28
-
29
- sdxl_loras = [
30
- "",
31
- "https://huggingface.co/SPO-Diffusion-Models/SPO-SDXL_4k-p_10ep_LoRA/blob/main/spo_sdxl_10ep_4k-data_lora_diffusers.safetensors",
32
- "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_smallcfg_2step_converted.safetensors",
33
- "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_smallcfg_4step_converted.safetensors",
34
- "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_smallcfg_8step_converted.safetensors",
35
- "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_normalcfg_8step_converted.safetensors",
36
- "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_normalcfg_16step_converted.safetensors",
37
- "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-1step-lora.safetensors",
38
- "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-2steps-lora.safetensors",
39
- "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-4steps-lora.safetensors",
40
- "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-8steps-CFG-lora.safetensors",
41
- "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-12steps-CFG-lora.safetensors",
42
- "https://huggingface.co/latent-consistency/lcm-lora-sdxl/blob/main/pytorch_lora_weights.safetensors",
43
- ]
44
-
45
-
46
- sdxl_preset_dict = {
47
- "Default": [DEFAULT_DTYPE, "", "Euler a", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0],
48
- "Bake in standard VAE": [DEFAULT_DTYPE, "https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/blob/main/sdxl.vae.safetensors",
49
- "Euler a", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0],
50
- "Hyper-SDXL / SPO": [DEFAULT_DTYPE, "https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/blob/main/sdxl.vae.safetensors",
51
- "TCD", "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-8steps-CFG-lora.safetensors", 1.0,
52
- "https://huggingface.co/SPO-Diffusion-Models/SPO-SDXL_4k-p_10ep_LoRA/blob/main/spo_sdxl_10ep_4k-data_lora_diffusers.safetensors",
53
- 1.0, "", 1.0, "", 1.0, "", 1.0],
54
- }
55
-
56
-
57
- def sdxl_set_presets(preset: str="Default"):
58
- p = []
59
- if preset in sdxl_preset_dict.keys(): p = sdxl_preset_dict[preset]
60
- else: p = sdxl_preset_dict["Default"]
61
- return p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12]
62
-
63
-
64
- sd15_vaes = [
65
- "",
66
- "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt",
67
- "https://huggingface.co/stabilityai/sd-vae-ft-ema-original/resolve/main/vae-ft-ema-560000-ema-pruned.ckpt",
68
- ]
69
-
70
-
71
- sd15_loras = [
72
- "",
73
- "https://huggingface.co/SPO-Diffusion-Models/SPO-SD-v1-5_4k-p_10ep_LoRA/blob/main/spo-sd-v1-5_4k-p_10ep_lora_diffusers.safetensors",
74
- ]
75
-
76
-
77
- sd15_preset_dict = {
78
- "Default": [DEFAULT_DTYPE, "", "Euler", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, True],
79
- "Bake in standard VAE": [DEFAULT_DTYPE, "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt",
80
- "Euler", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, True],
81
- }
82
-
83
-
84
- def sd15_set_presets(preset: str="Default"):
85
- p = []
86
- if preset in sd15_preset_dict.keys(): p = sd15_preset_dict[preset]
87
- else: p = sd15_preset_dict["Default"]
88
- return p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13]
89
-
90
-
91
- flux_vaes = [
92
- "",
93
- ]
94
-
95
-
96
- flux_loras = [
97
- "",
98
- ]
99
-
100
-
101
- flux_preset_dict = {
102
- "dev": ["bf16", "", "", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, "camenduru/FLUX.1-dev-diffusers"],
103
- "schnell": ["bf16", "", "", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, "black-forest-labs/FLUX.1-schnell"],
104
- }
105
-
106
-
107
- def flux_set_presets(preset: str="dev"):
108
- p = []
109
- if preset in flux_preset_dict.keys(): p = flux_preset_dict[preset]
110
- else: p = flux_preset_dict["dev"]
111
- return p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13]
112
-
113
-
114
-
115
- sd35_vaes = [
116
- "",
117
- ]
118
-
119
-
120
- sd35_loras = [
121
- "",
122
- ]
123
-
124
-
125
- sd35_preset_dict = {
126
- "Default": ["bf16", "", "", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, "adamo1139/stable-diffusion-3.5-large-ungated"],
127
- }
128
-
129
-
130
- def sd35_set_presets(preset: str="dev"):
131
- p = []
132
- if preset in sd35_preset_dict.keys(): p = sd35_preset_dict[preset]
133
- else: p = sd35_preset_dict["Default"]
134
- return p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sdutils import get_dtypes, SCHEDULER_CONFIG_MAP
2
+ import gradio as gr
3
+
4
+
5
+ DEFAULT_DTYPE = get_dtypes()[0]
6
+ schedulers = list(SCHEDULER_CONFIG_MAP.keys())
7
+
8
+
9
+ clips = [
10
+ "",
11
+ "openai/clip-vit-large-patch14",
12
+ ]
13
+
14
+
15
+ t5s = [
16
+ "",
17
+ "https://huggingface.co/camenduru/FLUX.1-dev/blob/main/t5xxl_fp8_e4m3fn.safetensors",
18
+ ]
19
+
20
+
21
+ sdxl_vaes = [
22
+ "",
23
+ "https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/blob/main/sdxl.vae.safetensors",
24
+ "https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/blob/main/sdxl_vae-fp16fix-blessed.safetensors",
25
+ "https://huggingface.co/John6666/safetensors_converting_test/blob/main/xlVAEC_e7.safetensors",
26
+ "https://huggingface.co/John6666/safetensors_converting_test/blob/main/xlVAEC_f1.safetensors",
27
+ ]
28
+
29
+
30
+ sdxl_loras = [
31
+ "",
32
+ "https://huggingface.co/SPO-Diffusion-Models/SPO-SDXL_4k-p_10ep_LoRA/blob/main/spo_sdxl_10ep_4k-data_lora_diffusers.safetensors",
33
+ "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_smallcfg_2step_converted.safetensors",
34
+ "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_smallcfg_4step_converted.safetensors",
35
+ "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_smallcfg_8step_converted.safetensors",
36
+ "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_normalcfg_8step_converted.safetensors",
37
+ "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_normalcfg_16step_converted.safetensors",
38
+ "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-1step-lora.safetensors",
39
+ "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-2steps-lora.safetensors",
40
+ "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-4steps-lora.safetensors",
41
+ "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-8steps-CFG-lora.safetensors",
42
+ "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-12steps-CFG-lora.safetensors",
43
+ "https://huggingface.co/latent-consistency/lcm-lora-sdxl/blob/main/pytorch_lora_weights.safetensors",
44
+ ]
45
+
46
+
47
+ sdxl_preset_items = ["dtype", "vae", "scheduler", "lora1", "lora1s", "lora2", "lora2s", "lora3", "lora3s", "lora4", "lora4s", "lora5", "lora5s"]
48
+ sdxl_preset_dict = {
49
+ "Default": [DEFAULT_DTYPE, "", "Euler a", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0],
50
+ "Bake in standard VAE": [DEFAULT_DTYPE, "https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/blob/main/sdxl.vae.safetensors",
51
+ "Euler a", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0],
52
+ "Hyper-SDXL / SPO": [DEFAULT_DTYPE, "https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/blob/main/sdxl.vae.safetensors",
53
+ "TCD", "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-8steps-CFG-lora.safetensors", 1.0,
54
+ "https://huggingface.co/SPO-Diffusion-Models/SPO-SDXL_4k-p_10ep_LoRA/blob/main/spo_sdxl_10ep_4k-data_lora_diffusers.safetensors",
55
+ 1.0, "", 1.0, "", 1.0, "", 1.0],
56
+ }
57
+
58
+
59
+ def sdxl_set_presets(preset: str="Default"):
60
+ p = []
61
+ if preset in sdxl_preset_dict.keys(): p = sdxl_preset_dict[preset]
62
+ else: p = sdxl_preset_dict["Default"]
63
+ if len(p) != len(sdxl_preset_items): raise gr.Error("Invalid preset.")
64
+ print("Setting SDXL preset:", ", ".join([f"{x}:{y}" for x, y in zip(sdxl_preset_items, p)]))
65
+ return p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12]
66
+
67
+
68
+ sd15_vaes = [
69
+ "",
70
+ "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt",
71
+ "https://huggingface.co/stabilityai/sd-vae-ft-ema-original/resolve/main/vae-ft-ema-560000-ema-pruned.ckpt",
72
+ ]
73
+
74
+
75
+ sd15_loras = [
76
+ "",
77
+ "https://huggingface.co/SPO-Diffusion-Models/SPO-SD-v1-5_4k-p_10ep_LoRA/blob/main/spo-sd-v1-5_4k-p_10ep_lora_diffusers.safetensors",
78
+ ]
79
+
80
+
81
+ sd15_preset_items = ["dtype", "vae", "scheduler", "lora1", "lora1s", "lora2", "lora2s", "lora3", "lora3s", "lora4", "lora4s", "lora5", "lora5s", "ema"]
82
+ sd15_preset_dict = {
83
+ "Default": [DEFAULT_DTYPE, "", "Euler", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, True],
84
+ "Bake in standard VAE": [DEFAULT_DTYPE, "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt",
85
+ "Euler", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, True],
86
+ }
87
+
88
+
89
+ def sd15_set_presets(preset: str="Default"):
90
+ p = []
91
+ if preset in sd15_preset_dict.keys(): p = sd15_preset_dict[preset]
92
+ else: p = sd15_preset_dict["Default"]
93
+ if len(p) != len(sd15_preset_items): raise gr.Error("Invalid preset.")
94
+ print("Setting SD1.5 preset:", ", ".join([f"{x}:{y}" for x, y in zip(sd15_preset_items, p)]))
95
+ return p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13]
96
+
97
+
98
+ flux_vaes = [
99
+ "",
100
+ ]
101
+
102
+
103
+ flux_loras = [
104
+ "",
105
+ ]
106
+
107
+
108
+ flux_preset_items = ["dtype", "vae", "scheduler", "lora1", "lora1s", "lora2", "lora2s", "lora3", "lora3s", "lora4", "lora4s", "lora5", "lora5s", "base_repo"]
109
+ flux_preset_dict = {
110
+ "dev": ["bf16", "", "", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, "camenduru/FLUX.1-dev-diffusers"],
111
+ "schnell": ["bf16", "", "", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, "black-forest-labs/FLUX.1-schnell"],
112
+ }
113
+
114
+
115
+ def flux_set_presets(preset: str="dev"):
116
+ p = []
117
+ if preset in flux_preset_dict.keys(): p = flux_preset_dict[preset]
118
+ else: p = flux_preset_dict["dev"]
119
+ if len(p) != len(flux_preset_items): raise gr.Error("Invalid preset.")
120
+ print("Setting FLUX.1 preset:", ", ".join([f"{x}:{y}" for x, y in zip(flux_preset_items, p)]))
121
+ return p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13]
122
+
123
+
124
+
125
+ sd35_vaes = [
126
+ "",
127
+ ]
128
+
129
+
130
+ sd35_loras = [
131
+ "",
132
+ ]
133
+
134
+
135
+ sd35_preset_items = ["dtype", "vae", "scheduler", "lora1", "lora1s", "lora2", "lora2s", "lora3", "lora3s", "lora4", "lora4s", "lora5", "lora5s", "base_repo"]
136
+ sd35_preset_dict = {
137
+ "Default": ["bf16", "", "", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, "adamo1139/stable-diffusion-3.5-large-ungated"],
138
+ }
139
+
140
+
141
+ def sd35_set_presets(preset: str="dev"):
142
+ p = []
143
+ if preset in sd35_preset_dict.keys(): p = sd35_preset_dict[preset]
144
+ else: p = sd35_preset_dict["Default"]
145
+ if len(p) != len(sd35_preset_items): raise gr.Error("Invalid preset.")
146
+ print("Setting SD3.5 preset:", ", ".join([f"{x}:{y}" for x, y in zip(sd35_preset_items, p)]))
147
+ return p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13]
utils.py CHANGED
@@ -130,18 +130,17 @@ def download_thing(directory, url, civitai_api_key="", progress=gr.Progress(trac
130
  if "drive.google.com" in url:
131
  original_dir = os.getcwd()
132
  os.chdir(directory)
133
- os.system(f"gdown --fuzzy {url}")
134
  os.chdir(original_dir)
135
  elif "huggingface.co" in url:
136
  url = url.replace("?download=true", "")
137
  if "/blob/" in url: url = url.replace("/blob/", "/resolve/")
138
  download_hf_file(directory, url)
139
  elif "civitai.com" in url:
140
- if "?" in url:
141
- url = url.split("?")[0]
142
  if civitai_api_key:
143
- url = url + f"?token={civitai_api_key}"
144
- os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
 
145
  else:
146
  print("You need an API key to download Civitai models.")
147
  else:
@@ -152,7 +151,7 @@ def download_thing(directory, url, civitai_api_key="", progress=gr.Progress(trac
152
 
153
  def get_local_file_list(dir_path):
154
  file_list = []
155
- for file in Path(dir_path).glob("**/*.*"):
156
  if file.is_file():
157
  file_path = str(file)
158
  file_list.append(file_path)
@@ -181,8 +180,8 @@ def get_download_file(temp_dir, url, civitai_key, progress=gr.Progress(track_tqd
181
  return ""
182
  print(f"Download completed: {url}")
183
  return new_file
184
- except Exception:
185
- print(f"Download failed: {url}")
186
  return ""
187
 
188
 
@@ -198,17 +197,13 @@ def download_repo(repo_id: str, dir_path: str, progress=gr.Progress(track_tqdm=T
198
  return False
199
 
200
 
201
- def upload_repo(repo_id: str, dir_path: str, is_private: bool, progress=gr.Progress(track_tqdm=True)): # for diffusers repo
202
  hf_token = get_token()
203
  api = HfApi(token=hf_token)
204
  try:
205
  progress(0, desc="Start uploading...")
206
  api.create_repo(repo_id=repo_id, token=hf_token, private=is_private, exist_ok=True)
207
- for path in Path(dir_path).glob("*"):
208
- if path.is_dir():
209
- api.upload_folder(repo_id=repo_id, folder_path=str(path), path_in_repo=path.name, token=hf_token)
210
- elif path.is_file():
211
- api.upload_file(repo_id=repo_id, path_or_fileobj=str(path), path_in_repo=path.name, token=hf_token)
212
  progress(1, desc="Uploaded.")
213
  return get_hf_url(repo_id, "model")
214
  except Exception as e:
@@ -216,6 +211,18 @@ def upload_repo(repo_id: str, dir_path: str, is_private: bool, progress=gr.Progr
216
  return ""
217
 
218
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  HF_SUBFOLDER_NAME = ["None", "user_repo"]
220
 
221
 
 
130
  if "drive.google.com" in url:
131
  original_dir = os.getcwd()
132
  os.chdir(directory)
133
+ subprocess.run(f"gdown --fuzzy {url}", shell=True)
134
  os.chdir(original_dir)
135
  elif "huggingface.co" in url:
136
  url = url.replace("?download=true", "")
137
  if "/blob/" in url: url = url.replace("/blob/", "/resolve/")
138
  download_hf_file(directory, url)
139
  elif "civitai.com" in url:
 
 
140
  if civitai_api_key:
141
+ url = f"'{url}&token={civitai_api_key}'" if "?" in url else f"{url}?token={civitai_api_key}"
142
+ print(f"Downloading {url}")
143
+ subprocess.run(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}", shell=True)
144
  else:
145
  print("You need an API key to download Civitai models.")
146
  else:
 
151
 
152
  def get_local_file_list(dir_path):
153
  file_list = []
154
+ for file in Path(dir_path).glob("*/*.*"):
155
  if file.is_file():
156
  file_path = str(file)
157
  file_list.append(file_path)
 
180
  return ""
181
  print(f"Download completed: {url}")
182
  return new_file
183
+ except Exception as e:
184
+ print(f"Download failed: {url} {e}")
185
  return ""
186
 
187
 
 
197
  return False
198
 
199
 
200
+ def upload_repo(repo_id: str, dir_path: str, is_private: bool, is_pr: bool=False, progress=gr.Progress(track_tqdm=True)): # for diffusers repo
201
  hf_token = get_token()
202
  api = HfApi(token=hf_token)
203
  try:
204
  progress(0, desc="Start uploading...")
205
  api.create_repo(repo_id=repo_id, token=hf_token, private=is_private, exist_ok=True)
206
+ api.upload_folder(repo_id=repo_id, folder_path=dir_path, path_in_repo="", create_pr=is_pr, token=hf_token)
 
 
 
 
207
  progress(1, desc="Uploaded.")
208
  return get_hf_url(repo_id, "model")
209
  except Exception as e:
 
211
  return ""
212
 
213
 
214
+ def gate_repo(repo_id: str, gated_str: str, repo_type: str="model"):
215
+ hf_token = get_token()
216
+ api = HfApi(token=hf_token)
217
+ try:
218
+ if gated_str == "auto": gated = "auto"
219
+ elif gated_str == "manual": gated = "manual"
220
+ else: gated = False
221
+ api.update_repo_settings(repo_id=repo_id, gated=gated, repo_type=repo_type, token=hf_token)
222
+ except Exception as e:
223
+ print(f"Error: Failed to update settings {repo_id}. {e}")
224
+
225
+
226
  HF_SUBFOLDER_NAME = ["None", "user_repo"]
227
 
228