NagisaNao commited on
Commit
d6a8800
1 Parent(s): 88bad94

👽️ Updated part of CivitAi API & ✨Added new 'config' tag to download your config files :3

Browse files
files_cells/notebooks/en/downloading_en.ipynb CHANGED
@@ -318,7 +318,8 @@
318
  " \"embed\": embeddings_dir,\n",
319
  " \"extension\": extensions_dir,\n",
320
  " \"control\": control_dir,\n",
321
- " \"adetailer\": adetailer_dir\n",
 
322
  "}\n",
323
  "\n",
324
  "extension_repo = []\n",
@@ -347,49 +348,136 @@
347
  " print(f\"\\033[33mSAVE DIR: \\033[34m{dst_dir}\")\n",
348
  " print(f\"\\033[33mFILE NAME: \\033[34m{file_name}\\033[0m\")\n",
349
  "\n",
350
- "''' Get Image Preview | CivitAi '''\n",
351
- "\n",
352
- "def get_data_from_api(model_id):\n",
353
- " \"\"\"Fetch model data from the API\"\"\"\n",
354
- " endpoint_url = f\"https://civitai.com/api/v1/model-versions/{model_id}\"\n",
355
- " headers = {\"Content-Type\": \"application/json\"}\n",
356
- " try:\n",
357
- " response = requests.get(endpoint_url, headers=headers)\n",
358
- " response.raise_for_status()\n",
359
- " return response.json()\n",
360
- " except requests.exceptions.RequestException as e:\n",
361
- " print(f\"An error occurred: {e}\")\n",
362
- " return None\n",
363
- "\n",
364
- "def extract_model_info(data, url):\n",
365
- " \"\"\"Extract model information based on URL\"\"\"\n",
366
- " if 'type=' in url:\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367
  " model_type = parse_qs(urlparse(url).query).get('type', [''])[0]\n",
368
- " model_name = data['files'][1]['name']\n",
 
 
 
369
  " else:\n",
370
  " model_type = data['model']['type']\n",
371
  " model_name = data['files'][0]['name']\n",
372
  "\n",
373
- " # Finding a safe image: less than level 4 | Kaggle\n",
374
- " if env == 'Kaggle':\n",
375
- " image_url = next((image['url'] for image in data['images'] if image['nsfwLevel'] < 4), None)\n",
 
 
 
 
 
 
 
 
 
 
376
  " else:\n",
377
- " image_url = data['images'][0]['url']\n",
378
  "\n",
379
- " return model_type, model_name, image_url\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380
  "\n",
381
- "def gen_preview_filename(model_name, image_url):\n",
382
- " \"\"\"Generate a preview filename\"\"\"\n",
383
- " name = model_name.split('.')\n",
384
- " img_exts = image_url.split('.')\n",
385
- " return f\"{name[0]}.preview.{img_exts[-1]}\"\n",
386
  "\n",
387
- "''' main download code '''\n",
 
 
 
 
 
 
 
 
 
388
  "\n",
389
  "def handle_manual(url):\n",
390
  " url_parts = url.split(':', 1)\n",
391
- " prefix = url_parts[0]\n",
392
- " path = url_parts[1]\n",
393
  "\n",
394
  " file_name_match = re.search(r'\\[(.*?)\\]', path)\n",
395
  " file_name = file_name_match.group(1) if file_name_match else None\n",
@@ -411,40 +499,38 @@
411
  " basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
412
  " header_option = f\"--header={user_header}\"\n",
413
  "\n",
414
- " # ==== CivitAi API+ ====\n",
415
- " support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA') # for dl preview image\n",
416
- " civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
417
  "\n",
418
- " if 'civitai' in url:\n",
419
- " url = f\"{url}{'&' if '?' in url else '?'}token={civitai_token}\"\n",
420
- " model_id = url.split('/')[-1].split('?')[0]\n",
421
- " clean_url = re.sub(r'[?&]token=[^&]*', '', url) # hide token\n",
422
- "\n",
423
- " data = get_data_from_api(model_id)\n",
424
- " if data:\n",
425
- " model_type, model_name, image_url = extract_model_info(data, url)\n",
426
- "\n",
427
- " if any(t in model_type for t in support_types):\n",
428
- " if model_name and image_url:\n",
429
- " image_file_name = gen_preview_filename(model_name if not file_name else file_name, image_url)\n",
430
- " with capture.capture_output() as cap:\n",
431
- " !aria2c {aria2_args} -d {dst_dir} -o {image_file_name} '{image_url}'\n",
432
- " del cap\n",
433
- " file_name = file_name or model_name\n",
434
- " else:\n",
435
- " clean_url = url\n",
436
  "\n",
437
  " \"\"\" Formatted info output \"\"\"\n",
438
  " model_name_or_basename = file_name if not 'huggingface' in url else basename\n",
439
  " format_output(clean_url or url, dst_dir, model_name_or_basename)\n",
440
  "\n",
441
- " print(\"\\033[31m[Data Info]:\\033[0m Failed to retrieve data from the API.\\n\") if 'civitai' in url and not data else None\n",
442
- " if 'civitai' in url and data and any(t in model_type for t in support_types) and (locals().get('image_file_name') or ''):\n",
443
- " print(f\"\\033[32m[Preview DL]:\\033[0m {image_file_name} - {image_url}\\n\")\n",
 
 
444
  " # =====================\n",
445
  "\n",
 
 
 
 
446
  " # -- GDrive --\n",
447
- " if 'drive.google' in url:\n",
448
  " try:\n",
449
  " have_drive_link\n",
450
  " except:\n",
@@ -461,43 +547,13 @@
461
  "\n",
462
  " # -- Hugging Face --\n",
463
  " elif 'huggingface' in url:\n",
464
- " if '/blob/' in url:\n",
465
- " url = url.replace('/blob/', '/resolve/')\n",
466
- " !aria2c {header_option} {aria2_args} -d {dst_dir} -o {basename} '{url}'\n",
467
  "\n",
468
  " # -- Other --\n",
469
  " elif 'http' in url:\n",
470
- " !aria2c {aria2_args} -d {dst_dir} {'-o' + file_name if file_name else ''} '{url}'\n",
471
- "\n",
472
- "def download(url):\n",
473
- " links_and_paths = url.split(',')\n",
474
- "\n",
475
- " for link_or_path in links_and_paths:\n",
476
- " link_or_path = link_or_path.strip()\n",
477
- " if not link_or_path:\n",
478
- " continue\n",
479
- " if any(link_or_path.startswith(prefix.lower()) for prefix in prefixes):\n",
480
- " handle_manual(link_or_path)\n",
481
- " continue\n",
482
- "\n",
483
- " url, dst_dir, file_name = link_or_path.split()\n",
484
- " manual_download(url, dst_dir, file_name)\n",
485
- "\n",
486
- " unpucking_zip_files()\n",
487
- "\n",
488
- "# unpucking zip files\n",
489
- "def unpucking_zip_files():\n",
490
- " for directory in directories:\n",
491
- " for root, dirs, files in os.walk(directory):\n",
492
- " for file in files:\n",
493
- " if file.endswith(\".zip\"):\n",
494
- " zip_path = os.path.join(root, file)\n",
495
- " extract_path = os.path.splitext(zip_path)[0]\n",
496
- " with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n",
497
- " zip_ref.extractall(extract_path)\n",
498
- " os.remove(zip_path)\n",
499
  "\n",
500
- "''' submodels - added urls '''\n",
501
  "\n",
502
  "def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
503
  " if selection == \"none\":\n",
 
318
  " \"embed\": embeddings_dir,\n",
319
  " \"extension\": extensions_dir,\n",
320
  " \"control\": control_dir,\n",
321
+ " \"adetailer\": adetailer_dir,\n",
322
+ " \"config\": webui_path\n",
323
  "}\n",
324
  "\n",
325
  "extension_repo = []\n",
 
348
  " print(f\"\\033[33mSAVE DIR: \\033[34m{dst_dir}\")\n",
349
  " print(f\"\\033[33mFILE NAME: \\033[34m{file_name}\\033[0m\")\n",
350
  "\n",
351
+ "''' GET CivitAi API - DATA '''\n",
352
+ "\n",
353
+ "def strip_(url, file_name=None):\n",
354
+ " if 'github.com' in url:\n",
355
+ " if '/blob/' in url:\n",
356
+ " url = url.replace('/blob/', '/raw/')\n",
357
+ "\n",
358
+ " elif \"civitai.com\" in url:\n",
359
+ " return CivitAi_API(url, file_name)\n",
360
+ "\n",
361
+ " elif \"huggingface.co\" in url:\n",
362
+ " if '/blob/' in url:\n",
363
+ " url = url.replace('/blob/', '/resolve/')\n",
364
+ " if '?' in url:\n",
365
+ " url = url.split('?')[0]\n",
366
+ "\n",
367
+ " return url\n",
368
+ "\n",
369
+ "def CivitAi_API(url, file_name=None):\n",
370
+ " support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA')\n",
371
+ " civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
372
+ "\n",
373
+ " if '?token=' in url:\n",
374
+ " url = url.split('?token=')[0]\n",
375
+ " if '?type=' in url:\n",
376
+ " url = url.replace('?type=', f'?token={civitai_token}&type=')\n",
377
+ " else:\n",
378
+ " url = f\"{url}?token={civitai_token}\"\n",
379
+ "\n",
380
+ " # Determine model or version id\n",
381
+ " if \"civitai.com/models/\" in url:\n",
382
+ " if '?modelVersionId=' in url:\n",
383
+ " version_id = url.split('?modelVersionId=')[1]\n",
384
+ " response = requests.get(f\"https://civitai.com/api/v1/model-versions/{version_id}\")\n",
385
+ " # print(f\"end - https://civitai.com/api/v1/model-versions/{version_id}\")\n",
386
+ " else:\n",
387
+ " model_id = url.split('/models/')[1].split('/')[0]\n",
388
+ " response = requests.get(f\"https://civitai.com/api/v1/models/{model_id}\")\n",
389
+ " # print(f\"end - https://civitai.com/api/v1/models/{model_id}\")\n",
390
+ " else:\n",
391
+ " version_id = url.split('/models/')[1].split('/')[0]\n",
392
+ " response = requests.get(f\"https://civitai.com/api/v1/model-versions/{version_id}\")\n",
393
+ " # print(f\"end - https://civitai.com/api/v1/model-versions/{version_id}\")\n",
394
+ "\n",
395
+ " data = response.json()\n",
396
+ "\n",
397
+ " if response.status_code != 200:\n",
398
+ " return None, None, None, None, None, None, None\n",
399
+ "\n",
400
+ " # Define model type and name\n",
401
+ " if \"civitai.com/models/\" in url:\n",
402
+ " if '?modelVersionId=' in url:\n",
403
+ " model_type = data['model']['type']\n",
404
+ " model_name = data['files'][0]['name']\n",
405
+ " else:\n",
406
+ " model_type = data['type']\n",
407
+ " model_name = data['modelVersions'][0]['files'][0]['name']\n",
408
+ " elif 'type=' in url:\n",
409
  " model_type = parse_qs(urlparse(url).query).get('type', [''])[0]\n",
410
+ " if 'model' in model_type.lower():\n",
411
+ " model_name = data['files'][0]['name']\n",
412
+ " else:\n",
413
+ " model_name = data['files'][1]['name']\n",
414
  " else:\n",
415
  " model_type = data['model']['type']\n",
416
  " model_name = data['files'][0]['name']\n",
417
  "\n",
418
+ " model_name = file_name or model_name\n",
419
+ "\n",
420
+ " # Determine DownloadUrl\n",
421
+ " if \"civitai.com/models/\" in url:\n",
422
+ " if '?modelVersionId=' in url:\n",
423
+ " download_url = data.get('downloadUrl')\n",
424
+ " else:\n",
425
+ " download_url = data[\"modelVersions\"][0].get(\"downloadUrl\", \"\")\n",
426
+ " elif 'type=' in url:\n",
427
+ " if any(t.lower() in model_type.lower() for t in support_types):\n",
428
+ " download_url = data['files'][0]['downloadUrl']\n",
429
+ " else:\n",
430
+ " download_url = data['files'][1]['downloadUrl']\n",
431
  " else:\n",
432
+ " download_url = data.get('downloadUrl')\n",
433
  "\n",
434
+ " clean_url = re.sub(r'[?&]token=[^&]*', '', download_url) # hide token\n",
435
+ "\n",
436
+ " # Find a safe image: level less than 4 | Kaggle\n",
437
+ " image_url, image_name = None, None\n",
438
+ " if any(t in model_type for t in support_types):\n",
439
+ " try:\n",
440
+ " images = data.get('images') or data['modelVersions'][0].get('images', [])\n",
441
+ " if env == 'Kaggle':\n",
442
+ " image_url = next((image['url'] for image in images if image['nsfwLevel'] < 4), None)\n",
443
+ " else:\n",
444
+ " image_url = images[0]['url'] if images else None\n",
445
+ " except KeyError:\n",
446
+ " pass\n",
447
+ "\n",
448
+ " # Generate a name to save the image\n",
449
+ " image_name = f\"{model_name.split('.')[0]}.preview.{image_url.split('.')[-1]}\" if image_url else None\n",
450
+ "\n",
451
+ " return f\"{download_url}{'&' if '?' in download_url else '?'}token={civitai_token}\", clean_url, model_type, model_name, image_url, image_name, data\n",
452
+ "\n",
453
+ "''' Main Download Code '''\n",
454
+ "\n",
455
+ "def download(url):\n",
456
+ " links_and_paths = [link_or_path.strip() for link_or_path in url.split(',') if link_or_path.strip()]\n",
457
+ "\n",
458
+ " for link_or_path in links_and_paths:\n",
459
+ " if any(link_or_path.lower().startswith(prefix) for prefix in prefixes):\n",
460
+ " handle_manual(link_or_path)\n",
461
+ " else:\n",
462
+ " url, dst_dir, file_name = link_or_path.split()\n",
463
+ " manual_download(url, dst_dir, file_name)\n",
464
  "\n",
465
+ " unpack_zip_files()\n",
 
 
 
 
466
  "\n",
467
+ "def unpack_zip_files():\n",
468
+ " for directory in directories:\n",
469
+ " for root, _, files in os.walk(directory):\n",
470
+ " for file in files:\n",
471
+ " if file.endswith(\".zip\"):\n",
472
+ " zip_path = os.path.join(root, file)\n",
473
+ " extract_path = os.path.splitext(zip_path)[0]\n",
474
+ " with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n",
475
+ " zip_ref.extractall(extract_path)\n",
476
+ " os.remove(zip_path)\n",
477
  "\n",
478
  "def handle_manual(url):\n",
479
  " url_parts = url.split(':', 1)\n",
480
+ " prefix, path = url_parts[0], url_parts[1]\n",
 
481
  "\n",
482
  " file_name_match = re.search(r'\\[(.*?)\\]', path)\n",
483
  " file_name = file_name_match.group(1) if file_name_match else None\n",
 
499
  " basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
500
  " header_option = f\"--header={user_header}\"\n",
501
  "\n",
502
+ " if 'github.com' in url:\n",
503
+ " url = strip_(url)\n",
 
504
  "\n",
505
+ " # -- CivitAi APi+ V2 --\n",
506
+ " elif 'civitai' in url:\n",
507
+ " url, clean_url, model_type, file_name, image_url, image_name, data = strip_(url, file_name)\n",
508
+ "\n",
509
+ " if image_url and image_name:\n",
510
+ " with capture.capture_output() as cap:\n",
511
+ " !aria2c {aria2_args} -d {dst_dir} -o '{image_name}' '{image_url}'\n",
512
+ " del cap\n",
513
+ "\n",
514
+ " elif \"huggingface.co\" in url:\n",
515
+ " clean_url = strip_(url)\n",
 
 
 
 
 
 
 
516
  "\n",
517
  " \"\"\" Formatted info output \"\"\"\n",
518
  " model_name_or_basename = file_name if not 'huggingface' in url else basename\n",
519
  " format_output(clean_url or url, dst_dir, model_name_or_basename)\n",
520
  "\n",
521
+ " # ## -- for my tests --\n",
522
+ " # print(url, dst_dir, model_name_or_basename)\n",
523
+ " print(f\"\\033[31m[Data Info]:\\033[0m Failed to retrieve data from the API.\\n\") if 'civitai' in url and not data else None\n",
524
+ " if 'civitai' in url and data and image_name:\n",
525
+ " print(f\"\\033[32m[Preview DL]:\\033[0m {image_name} - {image_url}\\n\")\n",
526
  " # =====================\n",
527
  "\n",
528
+ " # # -- Git Hub --\n",
529
+ " if 'github.com' in url or 'githubusercontent.com' in url:\n",
530
+ " !aria2c {aria2_args} -d {dst_dir} -o '{basename}' '{url}'\n",
531
+ "\n",
532
  " # -- GDrive --\n",
533
+ " elif 'drive.google' in url:\n",
534
  " try:\n",
535
  " have_drive_link\n",
536
  " except:\n",
 
547
  "\n",
548
  " # -- Hugging Face --\n",
549
  " elif 'huggingface' in url:\n",
550
+ " !aria2c {header_option} {aria2_args} -d {dst_dir} -o '{basename}' '{url}'\n",
 
 
551
  "\n",
552
  " # -- Other --\n",
553
  " elif 'http' in url:\n",
554
+ " !aria2c {aria2_args} -d {dst_dir} '{'-o' + file_name if file_name else ''}' '{url}'\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
555
  "\n",
556
+ "''' SubModels - Added URLs '''\n",
557
  "\n",
558
  "def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
559
  " if selection == \"none\":\n",
files_cells/notebooks/ru/downloading_ru.ipynb CHANGED
@@ -318,7 +318,8 @@
318
  " \"embed\": embeddings_dir,\n",
319
  " \"extension\": extensions_dir,\n",
320
  " \"control\": control_dir,\n",
321
- " \"adetailer\": adetailer_dir\n",
 
322
  "}\n",
323
  "\n",
324
  "extension_repo = []\n",
@@ -347,49 +348,136 @@
347
  " print(f\"\\033[33mSAVE DIR: \\033[34m{dst_dir}\")\n",
348
  " print(f\"\\033[33mFILE NAME: \\033[34m{file_name}\\033[0m\")\n",
349
  "\n",
350
- "''' Get Image Preview | CivitAi '''\n",
351
- "\n",
352
- "def get_data_from_api(model_id):\n",
353
- " \"\"\"Fetch model data from the API\"\"\"\n",
354
- " endpoint_url = f\"https://civitai.com/api/v1/model-versions/{model_id}\"\n",
355
- " headers = {\"Content-Type\": \"application/json\"}\n",
356
- " try:\n",
357
- " response = requests.get(endpoint_url, headers=headers)\n",
358
- " response.raise_for_status()\n",
359
- " return response.json()\n",
360
- " except requests.exceptions.RequestException as e:\n",
361
- " print(f\"An error occurred: {e}\")\n",
362
- " return None\n",
363
- "\n",
364
- "def extract_model_info(data, url):\n",
365
- " \"\"\"Extract model information based on URL\"\"\"\n",
366
- " if 'type=' in url:\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367
  " model_type = parse_qs(urlparse(url).query).get('type', [''])[0]\n",
368
- " model_name = data['files'][1]['name']\n",
 
 
 
369
  " else:\n",
370
  " model_type = data['model']['type']\n",
371
  " model_name = data['files'][0]['name']\n",
372
  "\n",
373
- " # Finding a safe image: less than level 4 | Kaggle\n",
374
- " if env == 'Kaggle':\n",
375
- " image_url = next((image['url'] for image in data['images'] if image['nsfwLevel'] < 4), None)\n",
 
 
 
 
 
 
 
 
 
 
376
  " else:\n",
377
- " image_url = data['images'][0]['url']\n",
378
  "\n",
379
- " return model_type, model_name, image_url\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380
  "\n",
381
- "def gen_preview_filename(model_name, image_url):\n",
382
- " \"\"\"Generate a preview filename\"\"\"\n",
383
- " name = model_name.split('.')\n",
384
- " img_exts = image_url.split('.')\n",
385
- " return f\"{name[0]}.preview.{img_exts[-1]}\"\n",
386
  "\n",
387
- "''' main download code '''\n",
 
 
 
 
 
 
 
 
 
388
  "\n",
389
  "def handle_manual(url):\n",
390
  " url_parts = url.split(':', 1)\n",
391
- " prefix = url_parts[0]\n",
392
- " path = url_parts[1]\n",
393
  "\n",
394
  " file_name_match = re.search(r'\\[(.*?)\\]', path)\n",
395
  " file_name = file_name_match.group(1) if file_name_match else None\n",
@@ -411,40 +499,38 @@
411
  " basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
412
  " header_option = f\"--header={user_header}\"\n",
413
  "\n",
414
- " # ==== CivitAi API+ ====\n",
415
- " support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA') # for dl preview image\n",
416
- " civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
417
  "\n",
418
- " if 'civitai' in url:\n",
419
- " url = f\"{url}{'&' if '?' in url else '?'}token={civitai_token}\"\n",
420
- " model_id = url.split('/')[-1].split('?')[0]\n",
421
- " clean_url = re.sub(r'[?&]token=[^&]*', '', url) # hide token\n",
422
- "\n",
423
- " data = get_data_from_api(model_id)\n",
424
- " if data:\n",
425
- " model_type, model_name, image_url = extract_model_info(data, url)\n",
426
- "\n",
427
- " if any(t in model_type for t in support_types):\n",
428
- " if model_name and image_url:\n",
429
- " image_file_name = gen_preview_filename(model_name if not file_name else file_name, image_url)\n",
430
- " with capture.capture_output() as cap:\n",
431
- " !aria2c {aria2_args} -d {dst_dir} -o {image_file_name} '{image_url}'\n",
432
- " del cap\n",
433
- " file_name = file_name or model_name\n",
434
- " else:\n",
435
- " clean_url = url\n",
436
  "\n",
437
  " \"\"\" Formatted info output \"\"\"\n",
438
  " model_name_or_basename = file_name if not 'huggingface' in url else basename\n",
439
  " format_output(clean_url or url, dst_dir, model_name_or_basename)\n",
440
  "\n",
441
- " print(\"\\033[31m[Data Info]:\\033[0m Failed to retrieve data from the API.\\n\") if 'civitai' in url and not data else None\n",
442
- " if 'civitai' in url and data and any(t in model_type for t in support_types) and (locals().get('image_file_name') or ''):\n",
443
- " print(f\"\\033[32m[Preview DL]:\\033[0m {image_file_name} - {image_url}\\n\")\n",
 
 
444
  " # =====================\n",
445
  "\n",
 
 
 
 
446
  " # -- GDrive --\n",
447
- " if 'drive.google' in url:\n",
448
  " try:\n",
449
  " have_drive_link\n",
450
  " except:\n",
@@ -461,43 +547,13 @@
461
  "\n",
462
  " # -- Hugging Face --\n",
463
  " elif 'huggingface' in url:\n",
464
- " if '/blob/' in url:\n",
465
- " url = url.replace('/blob/', '/resolve/')\n",
466
- " !aria2c {header_option} {aria2_args} -d {dst_dir} -o {basename} '{url}'\n",
467
  "\n",
468
  " # -- Other --\n",
469
  " elif 'http' in url:\n",
470
- " !aria2c {aria2_args} -d {dst_dir} {'-o' + file_name if file_name else ''} '{url}'\n",
471
- "\n",
472
- "def download(url):\n",
473
- " links_and_paths = url.split(',')\n",
474
- "\n",
475
- " for link_or_path in links_and_paths:\n",
476
- " link_or_path = link_or_path.strip()\n",
477
- " if not link_or_path:\n",
478
- " continue\n",
479
- " if any(link_or_path.startswith(prefix.lower()) for prefix in prefixes):\n",
480
- " handle_manual(link_or_path)\n",
481
- " continue\n",
482
- "\n",
483
- " url, dst_dir, file_name = link_or_path.split()\n",
484
- " manual_download(url, dst_dir, file_name)\n",
485
- "\n",
486
- " unpucking_zip_files()\n",
487
- "\n",
488
- "# unpucking zip files\n",
489
- "def unpucking_zip_files():\n",
490
- " for directory in directories:\n",
491
- " for root, dirs, files in os.walk(directory):\n",
492
- " for file in files:\n",
493
- " if file.endswith(\".zip\"):\n",
494
- " zip_path = os.path.join(root, file)\n",
495
- " extract_path = os.path.splitext(zip_path)[0]\n",
496
- " with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n",
497
- " zip_ref.extractall(extract_path)\n",
498
- " os.remove(zip_path)\n",
499
  "\n",
500
- "''' submodels - added urls '''\n",
501
  "\n",
502
  "def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
503
  " if selection == \"none\":\n",
 
318
  " \"embed\": embeddings_dir,\n",
319
  " \"extension\": extensions_dir,\n",
320
  " \"control\": control_dir,\n",
321
+ " \"adetailer\": adetailer_dir,\n",
322
+ " \"config\": webui_path\n",
323
  "}\n",
324
  "\n",
325
  "extension_repo = []\n",
 
348
  " print(f\"\\033[33mSAVE DIR: \\033[34m{dst_dir}\")\n",
349
  " print(f\"\\033[33mFILE NAME: \\033[34m{file_name}\\033[0m\")\n",
350
  "\n",
351
+ "''' GET CivitAi API - DATA '''\n",
352
+ "\n",
353
+ "def strip_(url, file_name=None):\n",
354
+ " if 'github.com' in url:\n",
355
+ " if '/blob/' in url:\n",
356
+ " url = url.replace('/blob/', '/raw/')\n",
357
+ "\n",
358
+ " elif \"civitai.com\" in url:\n",
359
+ " return CivitAi_API(url, file_name)\n",
360
+ "\n",
361
+ " elif \"huggingface.co\" in url:\n",
362
+ " if '/blob/' in url:\n",
363
+ " url = url.replace('/blob/', '/resolve/')\n",
364
+ " if '?' in url:\n",
365
+ " url = url.split('?')[0]\n",
366
+ "\n",
367
+ " return url\n",
368
+ "\n",
369
+ "def CivitAi_API(url, file_name=None):\n",
370
+ " support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA')\n",
371
+ " civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
372
+ "\n",
373
+ " if '?token=' in url:\n",
374
+ " url = url.split('?token=')[0]\n",
375
+ " if '?type=' in url:\n",
376
+ " url = url.replace('?type=', f'?token={civitai_token}&type=')\n",
377
+ " else:\n",
378
+ " url = f\"{url}?token={civitai_token}\"\n",
379
+ "\n",
380
+ " # Determine model or version id\n",
381
+ " if \"civitai.com/models/\" in url:\n",
382
+ " if '?modelVersionId=' in url:\n",
383
+ " version_id = url.split('?modelVersionId=')[1]\n",
384
+ " response = requests.get(f\"https://civitai.com/api/v1/model-versions/{version_id}\")\n",
385
+ " # print(f\"end - https://civitai.com/api/v1/model-versions/{version_id}\")\n",
386
+ " else:\n",
387
+ " model_id = url.split('/models/')[1].split('/')[0]\n",
388
+ " response = requests.get(f\"https://civitai.com/api/v1/models/{model_id}\")\n",
389
+ " # print(f\"end - https://civitai.com/api/v1/models/{model_id}\")\n",
390
+ " else:\n",
391
+ " version_id = url.split('/models/')[1].split('/')[0]\n",
392
+ " response = requests.get(f\"https://civitai.com/api/v1/model-versions/{version_id}\")\n",
393
+ " # print(f\"end - https://civitai.com/api/v1/model-versions/{version_id}\")\n",
394
+ "\n",
395
+ " data = response.json()\n",
396
+ "\n",
397
+ " if response.status_code != 200:\n",
398
+ " return None, None, None, None, None, None, None\n",
399
+ "\n",
400
+ " # Define model type and name\n",
401
+ " if \"civitai.com/models/\" in url:\n",
402
+ " if '?modelVersionId=' in url:\n",
403
+ " model_type = data['model']['type']\n",
404
+ " model_name = data['files'][0]['name']\n",
405
+ " else:\n",
406
+ " model_type = data['type']\n",
407
+ " model_name = data['modelVersions'][0]['files'][0]['name']\n",
408
+ " elif 'type=' in url:\n",
409
  " model_type = parse_qs(urlparse(url).query).get('type', [''])[0]\n",
410
+ " if 'model' in model_type.lower():\n",
411
+ " model_name = data['files'][0]['name']\n",
412
+ " else:\n",
413
+ " model_name = data['files'][1]['name']\n",
414
  " else:\n",
415
  " model_type = data['model']['type']\n",
416
  " model_name = data['files'][0]['name']\n",
417
  "\n",
418
+ " model_name = file_name or model_name\n",
419
+ "\n",
420
+ " # Determine DownloadUrl\n",
421
+ " if \"civitai.com/models/\" in url:\n",
422
+ " if '?modelVersionId=' in url:\n",
423
+ " download_url = data.get('downloadUrl')\n",
424
+ " else:\n",
425
+ " download_url = data[\"modelVersions\"][0].get(\"downloadUrl\", \"\")\n",
426
+ " elif 'type=' in url:\n",
427
+ " if any(t.lower() in model_type.lower() for t in support_types):\n",
428
+ " download_url = data['files'][0]['downloadUrl']\n",
429
+ " else:\n",
430
+ " download_url = data['files'][1]['downloadUrl']\n",
431
  " else:\n",
432
+ " download_url = data.get('downloadUrl')\n",
433
  "\n",
434
+ " clean_url = re.sub(r'[?&]token=[^&]*', '', download_url) # hide token\n",
435
+ "\n",
436
+ " # Find a safe image: level less than 4 | Kaggle\n",
437
+ " image_url, image_name = None, None\n",
438
+ " if any(t in model_type for t in support_types):\n",
439
+ " try:\n",
440
+ " images = data.get('images') or data['modelVersions'][0].get('images', [])\n",
441
+ " if env == 'Kaggle':\n",
442
+ " image_url = next((image['url'] for image in images if image['nsfwLevel'] < 4), None)\n",
443
+ " else:\n",
444
+ " image_url = images[0]['url'] if images else None\n",
445
+ " except KeyError:\n",
446
+ " pass\n",
447
+ "\n",
448
+ " # Generate a name to save the image\n",
449
+ " image_name = f\"{model_name.split('.')[0]}.preview.{image_url.split('.')[-1]}\" if image_url else None\n",
450
+ "\n",
451
+ " return f\"{download_url}{'&' if '?' in download_url else '?'}token={civitai_token}\", clean_url, model_type, model_name, image_url, image_name, data\n",
452
+ "\n",
453
+ "''' Main Download Code '''\n",
454
+ "\n",
455
+ "def download(url):\n",
456
+ " links_and_paths = [link_or_path.strip() for link_or_path in url.split(',') if link_or_path.strip()]\n",
457
+ "\n",
458
+ " for link_or_path in links_and_paths:\n",
459
+ " if any(link_or_path.lower().startswith(prefix) for prefix in prefixes):\n",
460
+ " handle_manual(link_or_path)\n",
461
+ " else:\n",
462
+ " url, dst_dir, file_name = link_or_path.split()\n",
463
+ " manual_download(url, dst_dir, file_name)\n",
464
  "\n",
465
+ " unpack_zip_files()\n",
 
 
 
 
466
  "\n",
467
+ "def unpack_zip_files():\n",
468
+ " for directory in directories:\n",
469
+ " for root, _, files in os.walk(directory):\n",
470
+ " for file in files:\n",
471
+ " if file.endswith(\".zip\"):\n",
472
+ " zip_path = os.path.join(root, file)\n",
473
+ " extract_path = os.path.splitext(zip_path)[0]\n",
474
+ " with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n",
475
+ " zip_ref.extractall(extract_path)\n",
476
+ " os.remove(zip_path)\n",
477
  "\n",
478
  "def handle_manual(url):\n",
479
  " url_parts = url.split(':', 1)\n",
480
+ " prefix, path = url_parts[0], url_parts[1]\n",
 
481
  "\n",
482
  " file_name_match = re.search(r'\\[(.*?)\\]', path)\n",
483
  " file_name = file_name_match.group(1) if file_name_match else None\n",
 
499
  " basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
500
  " header_option = f\"--header={user_header}\"\n",
501
  "\n",
502
+ " if 'github.com' in url:\n",
503
+ " url = strip_(url)\n",
 
504
  "\n",
505
+ " # -- CivitAi APi+ V2 --\n",
506
+ " elif 'civitai' in url:\n",
507
+ " url, clean_url, model_type, file_name, image_url, image_name, data = strip_(url, file_name)\n",
508
+ "\n",
509
+ " if image_url and image_name:\n",
510
+ " with capture.capture_output() as cap:\n",
511
+ " !aria2c {aria2_args} -d {dst_dir} -o '{image_name}' '{image_url}'\n",
512
+ " del cap\n",
513
+ "\n",
514
+ " elif \"huggingface.co\" in url:\n",
515
+ " clean_url = strip_(url)\n",
 
 
 
 
 
 
 
516
  "\n",
517
  " \"\"\" Formatted info output \"\"\"\n",
518
  " model_name_or_basename = file_name if not 'huggingface' in url else basename\n",
519
  " format_output(clean_url or url, dst_dir, model_name_or_basename)\n",
520
  "\n",
521
+ " # ## -- for my tests --\n",
522
+ " # print(url, dst_dir, model_name_or_basename)\n",
523
+ " print(f\"\\033[31m[Data Info]:\\033[0m Failed to retrieve data from the API.\\n\") if 'civitai' in url and not data else None\n",
524
+ " if 'civitai' in url and data and image_name:\n",
525
+ " print(f\"\\033[32m[Preview DL]:\\033[0m {image_name} - {image_url}\\n\")\n",
526
  " # =====================\n",
527
  "\n",
528
+ " # # -- Git Hub --\n",
529
+ " if 'github.com' in url or 'githubusercontent.com' in url:\n",
530
+ " !aria2c {aria2_args} -d {dst_dir} -o '{basename}' '{url}'\n",
531
+ "\n",
532
  " # -- GDrive --\n",
533
+ " elif 'drive.google' in url:\n",
534
  " try:\n",
535
  " have_drive_link\n",
536
  " except:\n",
 
547
  "\n",
548
  " # -- Hugging Face --\n",
549
  " elif 'huggingface' in url:\n",
550
+ " !aria2c {header_option} {aria2_args} -d {dst_dir} -o '{basename}' '{url}'\n",
 
 
551
  "\n",
552
  " # -- Other --\n",
553
  " elif 'http' in url:\n",
554
+ " !aria2c {aria2_args} -d {dst_dir} '{'-o' + file_name if file_name else ''}' '{url}'\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
555
  "\n",
556
+ "''' SubModels - Added URLs '''\n",
557
  "\n",
558
  "def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
559
  " if selection == \"none\":\n",
files_cells/python/en/downloading_en.py CHANGED
@@ -308,7 +308,8 @@ prefixes = {
308
  "embed": embeddings_dir,
309
  "extension": extensions_dir,
310
  "control": control_dir,
311
- "adetailer": adetailer_dir
 
312
  }
313
 
314
  extension_repo = []
@@ -337,49 +338,136 @@ def format_output(url, dst_dir, file_name):
337
  print(f"\033[33mSAVE DIR: \033[34m{dst_dir}")
338
  print(f"\033[33mFILE NAME: \033[34m{file_name}\033[0m")
339
 
340
- ''' Get Image Preview | CivitAi '''
341
-
342
- def get_data_from_api(model_id):
343
- """Fetch model data from the API"""
344
- endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_id}"
345
- headers = {"Content-Type": "application/json"}
346
- try:
347
- response = requests.get(endpoint_url, headers=headers)
348
- response.raise_for_status()
349
- return response.json()
350
- except requests.exceptions.RequestException as e:
351
- print(f"An error occurred: {e}")
352
- return None
353
-
354
- def extract_model_info(data, url):
355
- """Extract model information based on URL"""
356
- if 'type=' in url:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357
  model_type = parse_qs(urlparse(url).query).get('type', [''])[0]
358
- model_name = data['files'][1]['name']
 
 
 
359
  else:
360
  model_type = data['model']['type']
361
  model_name = data['files'][0]['name']
362
 
363
- # Finding a safe image: less than level 4 | Kaggle
364
- if env == 'Kaggle':
365
- image_url = next((image['url'] for image in data['images'] if image['nsfwLevel'] < 4), None)
 
 
 
 
 
 
 
 
 
 
366
  else:
367
- image_url = data['images'][0]['url']
368
 
369
- return model_type, model_name, image_url
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370
 
371
- def gen_preview_filename(model_name, image_url):
372
- """Generate a preview filename"""
373
- name = model_name.split('.')
374
- img_exts = image_url.split('.')
375
- return f"{name[0]}.preview.{img_exts[-1]}"
376
 
377
- ''' main download code '''
 
 
 
 
 
 
 
 
 
378
 
379
  def handle_manual(url):
380
  url_parts = url.split(':', 1)
381
- prefix = url_parts[0]
382
- path = url_parts[1]
383
 
384
  file_name_match = re.search(r'\[(.*?)\]', path)
385
  file_name = file_name_match.group(1) if file_name_match else None
@@ -401,40 +489,38 @@ def manual_download(url, dst_dir, file_name):
401
  basename = url.split("/")[-1] if file_name is None else file_name
402
  header_option = f"--header={user_header}"
403
 
404
- # ==== CivitAi API+ ====
405
- support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA') # for dl preview image
406
- civitai_token = "62c0c5956b2f9defbd844d754000180b"
407
 
408
- if 'civitai' in url:
409
- url = f"{url}{'&' if '?' in url else '?'}token={civitai_token}"
410
- model_id = url.split('/')[-1].split('?')[0]
411
- clean_url = re.sub(r'[?&]token=[^&]*', '', url) # hide token
412
-
413
- data = get_data_from_api(model_id)
414
- if data:
415
- model_type, model_name, image_url = extract_model_info(data, url)
416
-
417
- if any(t in model_type for t in support_types):
418
- if model_name and image_url:
419
- image_file_name = gen_preview_filename(model_name if not file_name else file_name, image_url)
420
- with capture.capture_output() as cap:
421
- get_ipython().system("aria2c {aria2_args} -d {dst_dir} -o {image_file_name} '{image_url}'")
422
- del cap
423
- file_name = file_name or model_name
424
- else:
425
- clean_url = url
426
 
427
  """ Formatted info output """
428
  model_name_or_basename = file_name if not 'huggingface' in url else basename
429
  format_output(clean_url or url, dst_dir, model_name_or_basename)
430
 
431
- print("\033[31m[Data Info]:\033[0m Failed to retrieve data from the API.\n") if 'civitai' in url and not data else None
432
- if 'civitai' in url and data and any(t in model_type for t in support_types) and (locals().get('image_file_name') or ''):
433
- print(f"\033[32m[Preview DL]:\033[0m {image_file_name} - {image_url}\n")
 
 
434
  # =====================
435
 
 
 
 
 
436
  # -- GDrive --
437
- if 'drive.google' in url:
438
  try:
439
  have_drive_link
440
  except:
@@ -451,43 +537,13 @@ def manual_download(url, dst_dir, file_name):
451
 
452
  # -- Hugging Face --
453
  elif 'huggingface' in url:
454
- if '/blob/' in url:
455
- url = url.replace('/blob/', '/resolve/')
456
- get_ipython().system("aria2c {header_option} {aria2_args} -d {dst_dir} -o {basename} '{url}'")
457
 
458
  # -- Other --
459
  elif 'http' in url:
460
- get_ipython().system("aria2c {aria2_args} -d {dst_dir} {'-o' + file_name if file_name else ''} '{url}'")
461
-
462
- def download(url):
463
- links_and_paths = url.split(',')
464
-
465
- for link_or_path in links_and_paths:
466
- link_or_path = link_or_path.strip()
467
- if not link_or_path:
468
- continue
469
- if any(link_or_path.startswith(prefix.lower()) for prefix in prefixes):
470
- handle_manual(link_or_path)
471
- continue
472
-
473
- url, dst_dir, file_name = link_or_path.split()
474
- manual_download(url, dst_dir, file_name)
475
-
476
- unpucking_zip_files()
477
-
478
- # unpucking zip files
479
- def unpucking_zip_files():
480
- for directory in directories:
481
- for root, dirs, files in os.walk(directory):
482
- for file in files:
483
- if file.endswith(".zip"):
484
- zip_path = os.path.join(root, file)
485
- extract_path = os.path.splitext(zip_path)[0]
486
- with zipfile.ZipFile(zip_path, 'r') as zip_ref:
487
- zip_ref.extractall(extract_path)
488
- os.remove(zip_path)
489
 
490
- ''' submodels - added urls '''
491
 
492
  def add_submodels(selection, num_selection, model_dict, dst_dir):
493
  if selection == "none":
 
308
  "embed": embeddings_dir,
309
  "extension": extensions_dir,
310
  "control": control_dir,
311
+ "adetailer": adetailer_dir,
312
+ "config": webui_path
313
  }
314
 
315
  extension_repo = []
 
338
  print(f"\033[33mSAVE DIR: \033[34m{dst_dir}")
339
  print(f"\033[33mFILE NAME: \033[34m{file_name}\033[0m")
340
 
341
+ ''' GET CivitAi API - DATA '''
342
+
343
+ def strip_(url, file_name=None):
344
+ if 'github.com' in url:
345
+ if '/blob/' in url:
346
+ url = url.replace('/blob/', '/raw/')
347
+
348
+ elif "civitai.com" in url:
349
+ return CivitAi_API(url, file_name)
350
+
351
+ elif "huggingface.co" in url:
352
+ if '/blob/' in url:
353
+ url = url.replace('/blob/', '/resolve/')
354
+ if '?' in url:
355
+ url = url.split('?')[0]
356
+
357
+ return url
358
+
359
+ def CivitAi_API(url, file_name=None):
360
+ support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA')
361
+ civitai_token = "62c0c5956b2f9defbd844d754000180b"
362
+
363
+ if '?token=' in url:
364
+ url = url.split('?token=')[0]
365
+ if '?type=' in url:
366
+ url = url.replace('?type=', f'?token={civitai_token}&type=')
367
+ else:
368
+ url = f"{url}?token={civitai_token}"
369
+
370
+ # Determine model or version id
371
+ if "civitai.com/models/" in url:
372
+ if '?modelVersionId=' in url:
373
+ version_id = url.split('?modelVersionId=')[1]
374
+ response = requests.get(f"https://civitai.com/api/v1/model-versions/{version_id}")
375
+ # print(f"end - https://civitai.com/api/v1/model-versions/{version_id}")
376
+ else:
377
+ model_id = url.split('/models/')[1].split('/')[0]
378
+ response = requests.get(f"https://civitai.com/api/v1/models/{model_id}")
379
+ # print(f"end - https://civitai.com/api/v1/models/{model_id}")
380
+ else:
381
+ version_id = url.split('/models/')[1].split('/')[0]
382
+ response = requests.get(f"https://civitai.com/api/v1/model-versions/{version_id}")
383
+ # print(f"end - https://civitai.com/api/v1/model-versions/{version_id}")
384
+
385
+ data = response.json()
386
+
387
+ if response.status_code != 200:
388
+ return None, None, None, None, None, None, None
389
+
390
+ # Define model type and name
391
+ if "civitai.com/models/" in url:
392
+ if '?modelVersionId=' in url:
393
+ model_type = data['model']['type']
394
+ model_name = data['files'][0]['name']
395
+ else:
396
+ model_type = data['type']
397
+ model_name = data['modelVersions'][0]['files'][0]['name']
398
+ elif 'type=' in url:
399
  model_type = parse_qs(urlparse(url).query).get('type', [''])[0]
400
+ if 'model' in model_type.lower():
401
+ model_name = data['files'][0]['name']
402
+ else:
403
+ model_name = data['files'][1]['name']
404
  else:
405
  model_type = data['model']['type']
406
  model_name = data['files'][0]['name']
407
 
408
+ model_name = file_name or model_name
409
+
410
+ # Determine DownloadUrl
411
+ if "civitai.com/models/" in url:
412
+ if '?modelVersionId=' in url:
413
+ download_url = data.get('downloadUrl')
414
+ else:
415
+ download_url = data["modelVersions"][0].get("downloadUrl", "")
416
+ elif 'type=' in url:
417
+ if any(t.lower() in model_type.lower() for t in support_types):
418
+ download_url = data['files'][0]['downloadUrl']
419
+ else:
420
+ download_url = data['files'][1]['downloadUrl']
421
  else:
422
+ download_url = data.get('downloadUrl')
423
 
424
+ clean_url = re.sub(r'[?&]token=[^&]*', '', download_url) # hide token
425
+
426
+ # Find a safe image: level less than 4 | Kaggle
427
+ image_url, image_name = None, None
428
+ if any(t in model_type for t in support_types):
429
+ try:
430
+ images = data.get('images') or data['modelVersions'][0].get('images', [])
431
+ if env == 'Kaggle':
432
+ image_url = next((image['url'] for image in images if image['nsfwLevel'] < 4), None)
433
+ else:
434
+ image_url = images[0]['url'] if images else None
435
+ except KeyError:
436
+ pass
437
+
438
+ # Generate a name to save the image
439
+ image_name = f"{model_name.split('.')[0]}.preview.{image_url.split('.')[-1]}" if image_url else None
440
+
441
+ return f"{download_url}{'&' if '?' in download_url else '?'}token={civitai_token}", clean_url, model_type, model_name, image_url, image_name, data
442
+
443
+ ''' Main Download Code '''
444
+
445
+ def download(url):
446
+ links_and_paths = [link_or_path.strip() for link_or_path in url.split(',') if link_or_path.strip()]
447
+
448
+ for link_or_path in links_and_paths:
449
+ if any(link_or_path.lower().startswith(prefix) for prefix in prefixes):
450
+ handle_manual(link_or_path)
451
+ else:
452
+ url, dst_dir, file_name = link_or_path.split()
453
+ manual_download(url, dst_dir, file_name)
454
 
455
+ unpack_zip_files()
 
 
 
 
456
 
457
+ def unpack_zip_files():
458
+ for directory in directories:
459
+ for root, _, files in os.walk(directory):
460
+ for file in files:
461
+ if file.endswith(".zip"):
462
+ zip_path = os.path.join(root, file)
463
+ extract_path = os.path.splitext(zip_path)[0]
464
+ with zipfile.ZipFile(zip_path, 'r') as zip_ref:
465
+ zip_ref.extractall(extract_path)
466
+ os.remove(zip_path)
467
 
468
  def handle_manual(url):
469
  url_parts = url.split(':', 1)
470
+ prefix, path = url_parts[0], url_parts[1]
 
471
 
472
  file_name_match = re.search(r'\[(.*?)\]', path)
473
  file_name = file_name_match.group(1) if file_name_match else None
 
489
  basename = url.split("/")[-1] if file_name is None else file_name
490
  header_option = f"--header={user_header}"
491
 
492
+ if 'github.com' in url:
493
+ url = strip_(url)
 
494
 
495
+ # -- CivitAi APi+ V2 --
496
+ elif 'civitai' in url:
497
+ url, clean_url, model_type, file_name, image_url, image_name, data = strip_(url, file_name)
498
+
499
+ if image_url and image_name:
500
+ with capture.capture_output() as cap:
501
+ get_ipython().system("aria2c {aria2_args} -d {dst_dir} -o '{image_name}' '{image_url}'")
502
+ del cap
503
+
504
+ elif "huggingface.co" in url:
505
+ clean_url = strip_(url)
 
 
 
 
 
 
 
506
 
507
  """ Formatted info output """
508
  model_name_or_basename = file_name if not 'huggingface' in url else basename
509
  format_output(clean_url or url, dst_dir, model_name_or_basename)
510
 
511
+ # ## -- for my tests --
512
+ # print(url, dst_dir, model_name_or_basename)
513
+ print(f"\033[31m[Data Info]:\033[0m Failed to retrieve data from the API.\n") if 'civitai' in url and not data else None
514
+ if 'civitai' in url and data and image_name:
515
+ print(f"\033[32m[Preview DL]:\033[0m {image_name} - {image_url}\n")
516
  # =====================
517
 
518
+ # # -- Git Hub --
519
+ if 'github.com' in url or 'githubusercontent.com' in url:
520
+ get_ipython().system("aria2c {aria2_args} -d {dst_dir} -o '{basename}' '{url}'")
521
+
522
  # -- GDrive --
523
+ elif 'drive.google' in url:
524
  try:
525
  have_drive_link
526
  except:
 
537
 
538
  # -- Hugging Face --
539
  elif 'huggingface' in url:
540
+ get_ipython().system("aria2c {header_option} {aria2_args} -d {dst_dir} -o '{basename}' '{url}'")
 
 
541
 
542
  # -- Other --
543
  elif 'http' in url:
544
+ get_ipython().system("aria2c {aria2_args} -d {dst_dir} '{'-o' + file_name if file_name else ''}' '{url}'")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
545
 
546
+ ''' SubModels - Added URLs '''
547
 
548
  def add_submodels(selection, num_selection, model_dict, dst_dir):
549
  if selection == "none":
files_cells/python/ru/downloading_ru.py CHANGED
@@ -308,7 +308,8 @@ prefixes = {
308
  "embed": embeddings_dir,
309
  "extension": extensions_dir,
310
  "control": control_dir,
311
- "adetailer": adetailer_dir
 
312
  }
313
 
314
  extension_repo = []
@@ -337,49 +338,136 @@ def format_output(url, dst_dir, file_name):
337
  print(f"\033[33mSAVE DIR: \033[34m{dst_dir}")
338
  print(f"\033[33mFILE NAME: \033[34m{file_name}\033[0m")
339
 
340
- ''' Get Image Preview | CivitAi '''
341
-
342
- def get_data_from_api(model_id):
343
- """Fetch model data from the API"""
344
- endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_id}"
345
- headers = {"Content-Type": "application/json"}
346
- try:
347
- response = requests.get(endpoint_url, headers=headers)
348
- response.raise_for_status()
349
- return response.json()
350
- except requests.exceptions.RequestException as e:
351
- print(f"An error occurred: {e}")
352
- return None
353
-
354
- def extract_model_info(data, url):
355
- """Extract model information based on URL"""
356
- if 'type=' in url:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357
  model_type = parse_qs(urlparse(url).query).get('type', [''])[0]
358
- model_name = data['files'][1]['name']
 
 
 
359
  else:
360
  model_type = data['model']['type']
361
  model_name = data['files'][0]['name']
362
 
363
- # Finding a safe image: less than level 4 | Kaggle
364
- if env == 'Kaggle':
365
- image_url = next((image['url'] for image in data['images'] if image['nsfwLevel'] < 4), None)
 
 
 
 
 
 
 
 
 
 
366
  else:
367
- image_url = data['images'][0]['url']
368
 
369
- return model_type, model_name, image_url
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370
 
371
- def gen_preview_filename(model_name, image_url):
372
- """Generate a preview filename"""
373
- name = model_name.split('.')
374
- img_exts = image_url.split('.')
375
- return f"{name[0]}.preview.{img_exts[-1]}"
376
 
377
- ''' main download code '''
 
 
 
 
 
 
 
 
 
378
 
379
  def handle_manual(url):
380
  url_parts = url.split(':', 1)
381
- prefix = url_parts[0]
382
- path = url_parts[1]
383
 
384
  file_name_match = re.search(r'\[(.*?)\]', path)
385
  file_name = file_name_match.group(1) if file_name_match else None
@@ -401,40 +489,38 @@ def manual_download(url, dst_dir, file_name):
401
  basename = url.split("/")[-1] if file_name is None else file_name
402
  header_option = f"--header={user_header}"
403
 
404
- # ==== CivitAi API+ ====
405
- support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA') # for dl preview image
406
- civitai_token = "62c0c5956b2f9defbd844d754000180b"
407
 
408
- if 'civitai' in url:
409
- url = f"{url}{'&' if '?' in url else '?'}token={civitai_token}"
410
- model_id = url.split('/')[-1].split('?')[0]
411
- clean_url = re.sub(r'[?&]token=[^&]*', '', url) # hide token
412
-
413
- data = get_data_from_api(model_id)
414
- if data:
415
- model_type, model_name, image_url = extract_model_info(data, url)
416
-
417
- if any(t in model_type for t in support_types):
418
- if model_name and image_url:
419
- image_file_name = gen_preview_filename(model_name if not file_name else file_name, image_url)
420
- with capture.capture_output() as cap:
421
- get_ipython().system("aria2c {aria2_args} -d {dst_dir} -o {image_file_name} '{image_url}'")
422
- del cap
423
- file_name = file_name or model_name
424
- else:
425
- clean_url = url
426
 
427
  """ Formatted info output """
428
  model_name_or_basename = file_name if not 'huggingface' in url else basename
429
  format_output(clean_url or url, dst_dir, model_name_or_basename)
430
 
431
- print("\033[31m[Data Info]:\033[0m Failed to retrieve data from the API.\n") if 'civitai' in url and not data else None
432
- if 'civitai' in url and data and any(t in model_type for t in support_types) and (locals().get('image_file_name') or ''):
433
- print(f"\033[32m[Preview DL]:\033[0m {image_file_name} - {image_url}\n")
 
 
434
  # =====================
435
 
 
 
 
 
436
  # -- GDrive --
437
- if 'drive.google' in url:
438
  try:
439
  have_drive_link
440
  except:
@@ -451,43 +537,13 @@ def manual_download(url, dst_dir, file_name):
451
 
452
  # -- Hugging Face --
453
  elif 'huggingface' in url:
454
- if '/blob/' in url:
455
- url = url.replace('/blob/', '/resolve/')
456
- get_ipython().system("aria2c {header_option} {aria2_args} -d {dst_dir} -o {basename} '{url}'")
457
 
458
  # -- Other --
459
  elif 'http' in url:
460
- get_ipython().system("aria2c {aria2_args} -d {dst_dir} {'-o' + file_name if file_name else ''} '{url}'")
461
-
462
- def download(url):
463
- links_and_paths = url.split(',')
464
-
465
- for link_or_path in links_and_paths:
466
- link_or_path = link_or_path.strip()
467
- if not link_or_path:
468
- continue
469
- if any(link_or_path.startswith(prefix.lower()) for prefix in prefixes):
470
- handle_manual(link_or_path)
471
- continue
472
-
473
- url, dst_dir, file_name = link_or_path.split()
474
- manual_download(url, dst_dir, file_name)
475
-
476
- unpucking_zip_files()
477
-
478
- # unpucking zip files
479
- def unpucking_zip_files():
480
- for directory in directories:
481
- for root, dirs, files in os.walk(directory):
482
- for file in files:
483
- if file.endswith(".zip"):
484
- zip_path = os.path.join(root, file)
485
- extract_path = os.path.splitext(zip_path)[0]
486
- with zipfile.ZipFile(zip_path, 'r') as zip_ref:
487
- zip_ref.extractall(extract_path)
488
- os.remove(zip_path)
489
 
490
- ''' submodels - added urls '''
491
 
492
  def add_submodels(selection, num_selection, model_dict, dst_dir):
493
  if selection == "none":
 
308
  "embed": embeddings_dir,
309
  "extension": extensions_dir,
310
  "control": control_dir,
311
+ "adetailer": adetailer_dir,
312
+ "config": webui_path
313
  }
314
 
315
  extension_repo = []
 
338
  print(f"\033[33mSAVE DIR: \033[34m{dst_dir}")
339
  print(f"\033[33mFILE NAME: \033[34m{file_name}\033[0m")
340
 
341
+ ''' GET CivitAi API - DATA '''
342
+
343
+ def strip_(url, file_name=None):
344
+ if 'github.com' in url:
345
+ if '/blob/' in url:
346
+ url = url.replace('/blob/', '/raw/')
347
+
348
+ elif "civitai.com" in url:
349
+ return CivitAi_API(url, file_name)
350
+
351
+ elif "huggingface.co" in url:
352
+ if '/blob/' in url:
353
+ url = url.replace('/blob/', '/resolve/')
354
+ if '?' in url:
355
+ url = url.split('?')[0]
356
+
357
+ return url
358
+
359
+ def CivitAi_API(url, file_name=None):
360
+ support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA')
361
+ civitai_token = "62c0c5956b2f9defbd844d754000180b"
362
+
363
+ if '?token=' in url:
364
+ url = url.split('?token=')[0]
365
+ if '?type=' in url:
366
+ url = url.replace('?type=', f'?token={civitai_token}&type=')
367
+ else:
368
+ url = f"{url}?token={civitai_token}"
369
+
370
+ # Determine model or version id
371
+ if "civitai.com/models/" in url:
372
+ if '?modelVersionId=' in url:
373
+ version_id = url.split('?modelVersionId=')[1]
374
+ response = requests.get(f"https://civitai.com/api/v1/model-versions/{version_id}")
375
+ # print(f"end - https://civitai.com/api/v1/model-versions/{version_id}")
376
+ else:
377
+ model_id = url.split('/models/')[1].split('/')[0]
378
+ response = requests.get(f"https://civitai.com/api/v1/models/{model_id}")
379
+ # print(f"end - https://civitai.com/api/v1/models/{model_id}")
380
+ else:
381
+ version_id = url.split('/models/')[1].split('/')[0]
382
+ response = requests.get(f"https://civitai.com/api/v1/model-versions/{version_id}")
383
+ # print(f"end - https://civitai.com/api/v1/model-versions/{version_id}")
384
+
385
+ data = response.json()
386
+
387
+ if response.status_code != 200:
388
+ return None, None, None, None, None, None, None
389
+
390
+ # Define model type and name
391
+ if "civitai.com/models/" in url:
392
+ if '?modelVersionId=' in url:
393
+ model_type = data['model']['type']
394
+ model_name = data['files'][0]['name']
395
+ else:
396
+ model_type = data['type']
397
+ model_name = data['modelVersions'][0]['files'][0]['name']
398
+ elif 'type=' in url:
399
  model_type = parse_qs(urlparse(url).query).get('type', [''])[0]
400
+ if 'model' in model_type.lower():
401
+ model_name = data['files'][0]['name']
402
+ else:
403
+ model_name = data['files'][1]['name']
404
  else:
405
  model_type = data['model']['type']
406
  model_name = data['files'][0]['name']
407
 
408
+ model_name = file_name or model_name
409
+
410
+ # Determine DownloadUrl
411
+ if "civitai.com/models/" in url:
412
+ if '?modelVersionId=' in url:
413
+ download_url = data.get('downloadUrl')
414
+ else:
415
+ download_url = data["modelVersions"][0].get("downloadUrl", "")
416
+ elif 'type=' in url:
417
+ if any(t.lower() in model_type.lower() for t in support_types):
418
+ download_url = data['files'][0]['downloadUrl']
419
+ else:
420
+ download_url = data['files'][1]['downloadUrl']
421
  else:
422
+ download_url = data.get('downloadUrl')
423
 
424
+ clean_url = re.sub(r'[?&]token=[^&]*', '', download_url) # hide token
425
+
426
+ # Find a safe image: level less than 4 | Kaggle
427
+ image_url, image_name = None, None
428
+ if any(t in model_type for t in support_types):
429
+ try:
430
+ images = data.get('images') or data['modelVersions'][0].get('images', [])
431
+ if env == 'Kaggle':
432
+ image_url = next((image['url'] for image in images if image['nsfwLevel'] < 4), None)
433
+ else:
434
+ image_url = images[0]['url'] if images else None
435
+ except KeyError:
436
+ pass
437
+
438
+ # Generate a name to save the image
439
+ image_name = f"{model_name.split('.')[0]}.preview.{image_url.split('.')[-1]}" if image_url else None
440
+
441
+ return f"{download_url}{'&' if '?' in download_url else '?'}token={civitai_token}", clean_url, model_type, model_name, image_url, image_name, data
442
+
443
+ ''' Main Download Code '''
444
+
445
+ def download(url):
446
+ links_and_paths = [link_or_path.strip() for link_or_path in url.split(',') if link_or_path.strip()]
447
+
448
+ for link_or_path in links_and_paths:
449
+ if any(link_or_path.lower().startswith(prefix) for prefix in prefixes):
450
+ handle_manual(link_or_path)
451
+ else:
452
+ url, dst_dir, file_name = link_or_path.split()
453
+ manual_download(url, dst_dir, file_name)
454
 
455
+ unpack_zip_files()
 
 
 
 
456
 
457
+ def unpack_zip_files():
458
+ for directory in directories:
459
+ for root, _, files in os.walk(directory):
460
+ for file in files:
461
+ if file.endswith(".zip"):
462
+ zip_path = os.path.join(root, file)
463
+ extract_path = os.path.splitext(zip_path)[0]
464
+ with zipfile.ZipFile(zip_path, 'r') as zip_ref:
465
+ zip_ref.extractall(extract_path)
466
+ os.remove(zip_path)
467
 
468
  def handle_manual(url):
469
  url_parts = url.split(':', 1)
470
+ prefix, path = url_parts[0], url_parts[1]
 
471
 
472
  file_name_match = re.search(r'\[(.*?)\]', path)
473
  file_name = file_name_match.group(1) if file_name_match else None
 
489
  basename = url.split("/")[-1] if file_name is None else file_name
490
  header_option = f"--header={user_header}"
491
 
492
+ if 'github.com' in url:
493
+ url = strip_(url)
 
494
 
495
+ # -- CivitAi APi+ V2 --
496
+ elif 'civitai' in url:
497
+ url, clean_url, model_type, file_name, image_url, image_name, data = strip_(url, file_name)
498
+
499
+ if image_url and image_name:
500
+ with capture.capture_output() as cap:
501
+ get_ipython().system("aria2c {aria2_args} -d {dst_dir} -o '{image_name}' '{image_url}'")
502
+ del cap
503
+
504
+ elif "huggingface.co" in url:
505
+ clean_url = strip_(url)
 
 
 
 
 
 
 
506
 
507
  """ Formatted info output """
508
  model_name_or_basename = file_name if not 'huggingface' in url else basename
509
  format_output(clean_url or url, dst_dir, model_name_or_basename)
510
 
511
+ # ## -- for my tests --
512
+ # print(url, dst_dir, model_name_or_basename)
513
+ print(f"\033[31m[Data Info]:\033[0m Failed to retrieve data from the API.\n") if 'civitai' in url and not data else None
514
+ if 'civitai' in url and data and image_name:
515
+ print(f"\033[32m[Preview DL]:\033[0m {image_name} - {image_url}\n")
516
  # =====================
517
 
518
+ # # -- Git Hub --
519
+ if 'github.com' in url or 'githubusercontent.com' in url:
520
+ get_ipython().system("aria2c {aria2_args} -d {dst_dir} -o '{basename}' '{url}'")
521
+
522
  # -- GDrive --
523
+ elif 'drive.google' in url:
524
  try:
525
  have_drive_link
526
  except:
 
537
 
538
  # -- Hugging Face --
539
  elif 'huggingface' in url:
540
+ get_ipython().system("aria2c {header_option} {aria2_args} -d {dst_dir} -o '{basename}' '{url}'")
 
 
541
 
542
  # -- Other --
543
  elif 'http' in url:
544
+ get_ipython().system("aria2c {aria2_args} -d {dst_dir} '{'-o' + file_name if file_name else ''}' '{url}'")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
545
 
546
+ ''' SubModels - Added URLs '''
547
 
548
  def add_submodels(selection, num_selection, model_dict, dst_dir):
549
  if selection == "none":
special/dl_display_results.py CHANGED
@@ -194,7 +194,7 @@ header_widget = widgets.HTML(value=f'''
194
  ''')
195
 
196
  # Models
197
- models_list = get_files_list(models_dir, '.safetensors')
198
  models_widget = output_container_generator('Models', models_list)
199
  # Vaes
200
  vaes_list = get_files_list(vaes_dir, '.safetensors')
 
194
  ''')
195
 
196
  # Models
197
+ models_list = get_files_list(models_dir, ('.safetensors', '.ckpt'))
198
  models_widget = output_container_generator('Models', models_list)
199
  # Vaes
200
  vaes_list = get_files_list(vaes_dir, '.safetensors')