Spaces:
Running
Running
File size: 20,982 Bytes
2aeb649 9329734 2aeb649 9071ed9 2aeb649 2a43fc6 c5b9a3f 65351e7 ed36951 65351e7 2aeb649 9669215 45f530f 2aeb649 e48859b 2aeb649 ae53bf1 59320f6 bf3802a 2aeb649 827956e 2aeb649 c64188e 711594a e460389 83693e0 c5b9a3f 83693e0 e460389 6a97eae e460389 6a97eae e460389 c5b9a3f 2aeb649 a6acfcb 2aeb649 cd865fc 8d92190 245e93e a541bac 0dc6679 bf3802a a541bac bf3802a 4d371f5 ff31e08 a6acfcb ff31e08 4d371f5 2aeb649 8d92190 2aeb649 4d371f5 64d4b3b 2aeb649 e550a6d 9071ed9 c52b2b1 2aeb649 9071ed9 a541bac 2aeb649 9071ed9 a541bac 9071ed9 a541bac 2aeb649 a2a0ec8 2aeb649 a2a0ec8 2aeb649 a2a0ec8 a48615d 232dd74 a2a0ec8 2aeb649 5e31c93 2aeb649 a2a0ec8 e48859b 2aeb649 e48859b 2aeb649 8d55e8b f212c91 9c76a63 f212c91 2aeb649 f212c91 2aeb649 f212c91 2aeb649 f212c91 2aeb649 1d06c07 2aeb649 6389ef8 2aeb649 957fcdb e5317b8 9071ed9 6531480 1d06c07 53b0d0a 3b8cb93 53b0d0a 4f448b7 a541bac 623bb5f 30a8f33 8dddfec 30a8f33 410e0b2 30a8f33 4f448b7 b9ca2c3 0b1a6cb c52b2b1 63371f2 3f71e88 6531480 4d371f5 8d92190 3f71e88 4f448b7 2aeb649 f5a2481 2aeb649 4f448b7 2aeb649 f5a2481 1d06c07 2aeb649 8d92190 53b0d0a 1d06c07 f9f9336 1d06c07 b9ca2c3 53b0d0a 1d06c07 2aeb649 1d06c07 40ad051 2aeb649 2a43fc6 9f4249c 2a43fc6 54bc5b6 2a43fc6 4cca9c8 7ddb9ce 2a43fc6 b189c01 e48859b 2a43fc6 2d75e4d c50ed1f c1a469b c50ed1f 2417ae8 a1f1af1 2417ae8 c1a469b a1f1af1 2417ae8 b189c01 2aeb649 78994f5 0227cff 0817061 0227cff 59ba410 0817061 f14adde 7468c99 78503b1 c1a469b 7950bc5 08324cf f14adde d6bfdd6 9329734 433fbb7 f14adde 49bde08 4eff749 0817061 86897d2 85e76ee 7256938 85e76ee 0227cff 0e59848 ba55bdd 7256938 9af3a78 f14adde 7256938 2aeb649 da93b9f cfe8a79 e6d2c96 2aeb649 da93b9f 0aaa791 da93b9f 28a4f67 da93b9f 5751e99 2aeb649 0227cff 4d344de 0227cff 28a4f67 4d344de 0227cff cc0d7b4 0227cff f212c91 d2a957f 2a43fc6 8d55e8b 0817061 341a93b 7468c99 d6bfdd6 da93b9f a86c72a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 |
import requests
import os
import gradio as gr
from huggingface_hub import update_repo_visibility, whoami, upload_folder, create_repo, upload_file, update_repo_visibility
from slugify import slugify
import gradio as gr
import re
import uuid
from typing import Optional
import json
from bs4 import BeautifulSoup
TRUSTED_UPLOADERS = ["KappaNeuro", "CiroN2022", "multimodalart", "Norod78", "joachimsallstrom", "blink7630", "e-n-v-y", "DoctorDiffusion", "RalFinger", "artificialguybr"]
def get_json_data(url):
url_split = url.split('/')
api_url = f"https://civitai.com/api/v1/models/{url_split[4]}"
try:
response = requests.get(api_url)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error fetching JSON data: {e}")
return None
def check_nsfw(json_data, profile):
if json_data["nsfw"]:
return False
print(profile)
if(profile.username in TRUSTED_UPLOADERS):
return True
for model_version in json_data["modelVersions"]:
for image in model_version["images"]:
if image["nsfwLevel"] > 5:
return False
return True
def get_prompts_from_image(image_id):
print("image_id: ", image_id)
url = f'https://civitai.com/api/trpc/image.getGenerationData?input={{"json":{{"id":{image_id}}}}}'
print(url)
response = requests.get(url)
print(response)
prompt = ""
negative_prompt = ""
if response.status_code == 200:
data = response.json()
result = data['result']['data']['json']
if result['meta'] is not None and "prompt" in result['meta']:
prompt = result['meta']['prompt']
if result['meta'] is not None and "negativePrompt" in result['meta']:
negative_prompt = result["meta"]["negativePrompt"]
return prompt, negative_prompt
def extract_info(json_data):
if json_data["type"] == "LORA":
for model_version in json_data["modelVersions"]:
if model_version["baseModel"] in ["SDXL 1.0", "SDXL 0.9", "SD 1.5", "SD 1.4", "SD 2.1", "SD 2.0", "SD 2.0 768", "SD 2.1 768", "SD 3", "Flux.1 D", "Flux.1 S"]:
for file in model_version["files"]:
print(file)
if "primary" in file:
# Start by adding the primary file to the list
urls_to_download = [{"url": file["downloadUrl"], "filename": file["name"], "type": "weightName"}]
# Then append all image URLs to the list
for image in model_version["images"]:
image_id = image["url"].split("/")[-1].split(".")[0]
prompt, negative_prompt = get_prompts_from_image(image_id)
if image["nsfwLevel"] > 5:
pass #ugly before checking the actual logic
else:
urls_to_download.append({
"url": image["url"],
"filename": os.path.basename(image["url"]),
"type": "imageName",
"prompt": prompt, #if "meta" in image and "prompt" in image["meta"] else ""
"negative_prompt": negative_prompt
})
model_mapping = {
"SDXL 1.0": "stabilityai/stable-diffusion-xl-base-1.0",
"SDXL 0.9": "stabilityai/stable-diffusion-xl-base-1.0",
"SD 1.5": "runwayml/stable-diffusion-v1-5",
"SD 1.4": "CompVis/stable-diffusion-v1-4",
"SD 2.1": "stabilityai/stable-diffusion-2-1-base",
"SD 2.0": "stabilityai/stable-diffusion-2-base",
"SD 2.1 768": "stabilityai/stable-diffusion-2-1",
"SD 2.0 768": "stabilityai/stable-diffusion-2",
"SD 3": "stabilityai/stable-diffusion-3-medium-diffusers",
"Flux.1 D": "black-forest-labs/FLUX.1-dev",
"Flux.1 S": "black-forest-labs/FLUX.1-schnell"
}
base_model = model_mapping[model_version["baseModel"]]
info = {
"urls_to_download": urls_to_download,
"id": model_version["id"],
"baseModel": base_model,
"modelId": model_version.get("modelId", ""),
"name": json_data["name"],
"description": json_data["description"],
"trainedWords": model_version["trainedWords"] if "trainedWords" in model_version else [],
"creator": json_data["creator"]["username"],
"tags": json_data["tags"],
"allowNoCredit": json_data["allowNoCredit"],
"allowCommercialUse": json_data["allowCommercialUse"],
"allowDerivatives": json_data["allowDerivatives"],
"allowDifferentLicense": json_data["allowDifferentLicense"]
}
return info
return None
def download_files(info, folder="."):
downloaded_files = {
"imageName": [],
"imagePrompt": [],
"imageNegativePrompt": [],
"weightName": []
}
for item in info["urls_to_download"]:
download_file(item["url"], item["filename"], folder)
downloaded_files[item["type"]].append(item["filename"])
if(item["type"] == "imageName"):
prompt_clean = re.sub(r'<.*?>', '', item["prompt"])
negative_prompt_clean = re.sub(r'<.*?>', '', item["negative_prompt"])
downloaded_files["imagePrompt"].append(prompt_clean)
downloaded_files["imageNegativePrompt"].append(negative_prompt_clean)
return downloaded_files
def download_file(url, filename, folder="."):
headers = {}
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except requests.exceptions.HTTPError as e:
print(e)
if response.status_code == 401:
headers['Authorization'] = f'Bearer {os.environ["CIVITAI_API"]}'
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except requests.exceptions.RequestException as e:
raise gr.Error(f"Error downloading file: {e}")
else:
raise gr.Error(f"Error downloading file: {e}")
except requests.exceptions.RequestException as e:
raise gr.Error(f"Error downloading file: {e}")
with open(f"{folder}/{filename}", 'wb') as f:
f.write(response.content)
def process_url(url, profile, do_download=True, folder="."):
json_data = get_json_data(url)
if json_data:
if check_nsfw(json_data, profile):
info = extract_info(json_data)
if info:
if(do_download):
downloaded_files = download_files(info, folder)
else:
downloaded_files = []
return info, downloaded_files
else:
raise gr.Error("Only SDXL LoRAs are supported for now")
else:
raise gr.Error("This model has content tagged as unsafe by CivitAI")
else:
raise gr.Error("Something went wrong in fetching CivitAI API")
def create_readme(info, downloaded_files, user_repo_id, link_civit=False, is_author=True, folder="."):
readme_content = ""
original_url = f"https://civitai.com/models/{info['modelId']}"
link_civit_disclaimer = f'([CivitAI]({original_url}))'
non_author_disclaimer = f'This model was originally uploaded on [CivitAI]({original_url}), by [{info["creator"]}](https://civitai.com/user/{info["creator"]}/models). The information below was provided by the author on CivitAI:'
default_tags = ["text-to-image", "stable-diffusion", "lora", "diffusers", "template:sd-lora", "migrated"]
civit_tags = [t.replace(":", "") for t in info["tags"] if t not in default_tags]
tags = default_tags + civit_tags
unpacked_tags = "\n- ".join(tags)
trained_words = info['trainedWords'] if 'trainedWords' in info and info['trainedWords'] else []
formatted_words = ', '.join(f'`{word}`' for word in trained_words)
if formatted_words:
trigger_words_section = f"""## Trigger words
You should use {formatted_words} to trigger the image generation.
"""
else:
trigger_words_section = ""
widget_content = ""
for index, (prompt, negative_prompt, image) in enumerate(zip(downloaded_files["imagePrompt"], downloaded_files["imageNegativePrompt"], downloaded_files["imageName"])):
escaped_prompt = prompt.replace("'", "''")
negative_prompt_content = f"""parameters:
negative_prompt: {negative_prompt}
""" if negative_prompt else ""
widget_content += f"""- text: '{escaped_prompt if escaped_prompt else ' ' }'
{negative_prompt_content}
output:
url: >-
{image}
"""
dtype = "torch.bfloat16" if info["baseModel"] == "black-forest-labs/FLUX.1-dev" or info["baseModel"] == "black-forest-labs/FLUX.1-schnell" else "torch.float16"
content = f"""---
license: other
license_name: bespoke-lora-trained-license
license_link: https://multimodal.art/civitai-licenses?allowNoCredit={info["allowNoCredit"]}&allowCommercialUse={info["allowCommercialUse"][0] if info["allowCommercialUse"] else 1}&allowDerivatives={info["allowDerivatives"]}&allowDifferentLicense={info["allowDifferentLicense"]}
tags:
- {unpacked_tags}
base_model: {info["baseModel"]}
instance_prompt: {info['trainedWords'][0] if 'trainedWords' in info and len(info['trainedWords']) > 0 else ''}
widget:
{widget_content}
---
# {info["name"]}
<Gallery />
{non_author_disclaimer if not is_author else ''}
{link_civit_disclaimer if link_civit else ''}
## Model description
{info["description"]}
{trigger_words_section}
## Download model
Weights for this model are available in Safetensors format.
[Download](/{user_repo_id}/tree/main) them in the Files & versions tab.
## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
```py
from diffusers import AutoPipelineForText2Image
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
pipeline = AutoPipelineForText2Image.from_pretrained('{info["baseModel"]}', torch_dtype={dtype}).to(device)
pipeline.load_lora_weights('{user_repo_id}', weight_name='{downloaded_files["weightName"][0]}')
image = pipeline('{prompt if prompt else (formatted_words if formatted_words else 'Your custom prompt')}').images[0]
```
For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
"""
#for index, (image, prompt) in enumerate(zip(downloaded_files["imageName"], downloaded_files["imagePrompt"])):
# if index == 1:
# content += f"## Image examples for the model:\n![Image {index}]({image})\n> {prompt}\n"
# elif index > 1:
# content += f"\n![Image {index}]({image})\n> {prompt}\n"
readme_content += content + "\n"
with open(f"{folder}/README.md", "w") as file:
file.write(readme_content)
def get_creator(username):
url = f"https://civitai.com/api/trpc/user.getCreator?input=%7B%22json%22%3A%7B%22username%22%3A%22{username}%22%2C%22authed%22%3Atrue%7D%7D"
headers = {
"authority": "civitai.com",
"accept": "*/*",
"accept-language": "en-BR,en;q=0.9,pt-BR;q=0.8,pt;q=0.7,es-ES;q=0.6,es;q=0.5,de-LI;q=0.4,de;q=0.3,en-GB;q=0.2,en-US;q=0.1,sk;q=0.1",
"content-type": "application/json",
"cookie": f'{os.environ["COOKIE_INFO"]}',
"if-modified-since": "Tue, 22 Aug 2023 07:18:52 GMT",
"referer": f"https://civitai.com/user/{username}/models",
"sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "macOS",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
}
response = requests.get(url, headers=headers)
return response.json()
def extract_huggingface_username(username):
data = get_creator(username)
links = data.get('result', {}).get('data', {}).get('json', {}).get('links', [])
for link in links:
url = link.get('url', '')
if url.startswith('https://huggingface.co/') or url.startswith('https://www.huggingface.co/'):
username = url.split('/')[-1]
return username
return None
def check_civit_link(profile: Optional[gr.OAuthProfile], url):
info, _ = process_url(url, profile, do_download=False)
hf_username = extract_huggingface_username(info['creator'])
attributes_methods = dir(profile)
if(profile.username == "multimodalart"):
return '', gr.update(interactive=True), gr.update(visible=False), gr.update(visible=True)
if(not hf_username):
no_username_text = f'If you are {info["creator"]} on CivitAI, hi! Your CivitAI profile seems to not have information about your Hugging Face account. Please visit <a href="https://civitai.com/user/account" target="_blank">https://civitai.com/user/account</a> and include your 🤗 username there, here\'s mine:<br><img width="60%" src="https://i.imgur.com/hCbo9uL.png" /><br>(if you are not {info["creator"]}, you cannot submit their model at this time)'
return no_username_text, gr.update(interactive=False), gr.update(visible=True), gr.update(visible=False)
if(profile.username != hf_username):
unmatched_username_text = '<h4>Oops, the Hugging Face account in your CivitAI profile seems to be different than the one your are using here. Please visit <a href="https://civitai.com/user/account">https://civitai.com/user/account</a> and update it there to match your Hugging Face account<br><img src="https://i.imgur.com/hCbo9uL.png" /></h4>'
return unmatched_username_text, gr.update(interactive=False), gr.update(visible=True), gr.update(visible=False)
else:
return '', gr.update(interactive=True), gr.update(visible=False), gr.update(visible=True)
def swap_fill(profile: Optional[gr.OAuthProfile]):
if profile is None:
return gr.update(visible=True), gr.update(visible=False)
else:
return gr.update(visible=False), gr.update(visible=True)
def show_output():
return gr.update(visible=True)
def list_civit_models(username):
url = f"https://civitai.com/api/v1/models?username={username}&limit=100"
json_models_list = []
while url:
response = requests.get(url)
data = response.json()
# Add current page items to the list
json_models_list.extend(data.get('items', []))
# Check if there is a nextPage URL in the metadata
metadata = data.get('metadata', {})
url = metadata.get('nextPage', None)
urls = ""
for model in json_models_list:
urls += f'https://civitai.com/models/{model["id"]}/{slugify(model["name"])}\n'
return urls
def upload_civit_to_hf(profile: Optional[gr.OAuthProfile], oauth_token: gr.OAuthToken, url, link_civit=False):
if not profile.name:
return gr.Error("Are you sure you are logged in?")
folder = str(uuid.uuid4())
os.makedirs(folder, exist_ok=False)
gr.Info(f"Starting download of model {url}")
info, downloaded_files = process_url(url, profile, folder=folder)
username = {profile.username}
slug_name = slugify(info["name"])
user_repo_id = f"{profile.username}/{slug_name}"
create_readme(info, downloaded_files, user_repo_id, link_civit, folder=folder)
try:
create_repo(repo_id=user_repo_id, private=True, exist_ok=True, token=oauth_token.token)
gr.Info(f"Starting to upload repo {user_repo_id} to Hugging Face...")
upload_folder(
folder_path=folder,
repo_id=user_repo_id,
repo_type="model",
token=oauth_token.token
)
update_repo_visibility(repo_id=user_repo_id, private=False, token=oauth_token.token)
gr.Info(f"Model uploaded!")
except Exception as e:
print(e)
raise gr.Error("Your Hugging Face Token expired. Log out and in again to upload your models.")
return f'''# Model uploaded to 🤗!
## Access it here [{user_repo_id}](https://huggingface.co/{user_repo_id}) '''
def bulk_upload(profile: Optional[gr.OAuthProfile], oauth_token: gr.OAuthToken, urls, link_civit=False):
urls = urls.split("\n")
print(urls)
upload_results = ""
for url in urls:
if(url):
try:
upload_result = upload_civit_to_hf(profile, oauth_token, url, link_civit)
upload_results += upload_result+"\n"
except Exception as e:
gr.Warning(f"Error uploading the model {url}")
return upload_results
css = '''
#login {
width: 100% !important;
margin: 0 auto;
}
#disabled_upload{
opacity: 0.5;
pointer-events:none;
}
'''
with gr.Blocks(css=css) as demo:
gr.Markdown('''# Upload your CivitAI LoRA to Hugging Face 🤗
By uploading your LoRAs to Hugging Face you get diffusers compatibility, a free GPU-based Inference Widget, you'll be listed in [LoRA Studio](https://lorastudio.co/models) after a short review, and get the possibility to submit your model to the [LoRA the Explorer](https://huggingface.co/spaces/multimodalart/LoraTheExplorer) ✨
''')
gr.LoginButton(elem_id="login")
with gr.Column(elem_id="disabled_upload") as disabled_area:
with gr.Row():
submit_source_civit = gr.Textbox(
placeholder="https://civitai.com/models/144684/pixelartredmond-pixel-art-loras-for-sd-xl",
label="CivitAI model URL",
info="URL of the CivitAI LoRA",
)
submit_button_civit = gr.Button("Upload model to Hugging Face and submit", interactive=False)
with gr.Column(visible=False) as enabled_area:
with gr.Column():
submit_source_civit = gr.Textbox(
placeholder="https://civitai.com/models/144684/pixelartredmond-pixel-art-loras-for-sd-xl",
label="CivitAI model URL",
info="URL of the CivitAI LoRA",
)
with gr.Accordion("Bulk upload (bring in multiple LoRAs)", open=False):
civit_username_to_bulk = gr.Textbox(label="CivitAI username (optional)", info="Type your CivitAI username here to automagically fill the bulk models URLs list below (optional, you can paste links down here directly)")
submit_bulk_civit = gr.Textbox(
label="CivitAI bulk models URLs",
info="Add one URL per line",
lines=6,
)
link_civit = gr.Checkbox(label="Link back to CivitAI?", value=False)
bulk_button = gr.Button("Bulk upload")
instructions = gr.HTML("")
try_again_button = gr.Button("I have added my HF profile to my account (it may take 1 minute to refresh)", visible=False)
submit_button_civit = gr.Button("Upload model to Hugging Face", interactive=False)
output = gr.Markdown(label="Output progress", visible=False)
demo.load(fn=swap_fill, outputs=[disabled_area, enabled_area], queue=False)
submit_source_civit.change(fn=check_civit_link, inputs=[submit_source_civit], outputs=[instructions, submit_button_civit, try_again_button, submit_button_civit])
civit_username_to_bulk.change(fn=list_civit_models, inputs=[civit_username_to_bulk], outputs=[submit_bulk_civit])
try_again_button.click(fn=check_civit_link, inputs=[submit_source_civit], outputs=[instructions, submit_button_civit, try_again_button, submit_button_civit])
submit_button_civit.click(fn=show_output, inputs=[], outputs=[output]).then(fn=upload_civit_to_hf, inputs=[submit_source_civit, link_civit], outputs=[output])
bulk_button.click(fn=show_output, inputs=[], outputs=[output]).then(fn=bulk_upload, inputs=[submit_bulk_civit, link_civit], outputs=[output])
#gr.LogoutButton(elem_id="logout")
demo.queue(default_concurrency_limit=50)
demo.launch() |