TheLastBen
commited on
Commit
•
777e633
1
Parent(s):
9680a91
Update Scripts/mainpaperspacev1.py
Browse files- Scripts/mainpaperspacev1.py +13 -27
Scripts/mainpaperspacev1.py
CHANGED
@@ -38,7 +38,7 @@ def Deps(force_reinstall):
|
|
38 |
print('[1;32mModules and notebooks updated, dependencies already installed')
|
39 |
|
40 |
else:
|
41 |
-
print('[1;
|
42 |
call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
|
43 |
if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
|
44 |
os.chdir('/usr/local/lib/python3.9/dist-packages')
|
@@ -141,7 +141,7 @@ def downloadmodel_pth(CKPT_Path):
|
|
141 |
|
142 |
else:
|
143 |
while not os.path.exists(str(CKPT_Path)):
|
144 |
-
print('[1;31mWrong path, use the
|
145 |
time.sleep(5)
|
146 |
|
147 |
|
@@ -205,7 +205,7 @@ def sess(Session_Name, Session_Link_optional, MODEL_NAME):
|
|
205 |
WORKSPACE='/notebooks/Fast-Dreambooth'
|
206 |
|
207 |
if Session_Link_optional !="":
|
208 |
-
print('[1;
|
209 |
|
210 |
if Session_Link_optional != "":
|
211 |
if not os.path.exists(str(WORKSPACE+'/Sessions')):
|
@@ -343,11 +343,6 @@ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDE
|
|
343 |
|
344 |
def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
|
345 |
|
346 |
-
|
347 |
-
if os.path.exists(CAPTIONS_DIR+"off"):
|
348 |
-
call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
|
349 |
-
time.sleep(2)
|
350 |
-
|
351 |
if Remove_existing_instance_images:
|
352 |
if os.path.exists(str(INSTANCE_DIR)):
|
353 |
call("rm -r " +INSTANCE_DIR, shell=True)
|
@@ -451,11 +446,7 @@ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_
|
|
451 |
|
452 |
|
453 |
def caption(CAPTIONS_DIR, INSTANCE_DIR):
|
454 |
-
|
455 |
-
if os.path.exists(CAPTIONS_DIR+"off"):
|
456 |
-
call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
|
457 |
-
time.sleep(2)
|
458 |
-
|
459 |
paths=""
|
460 |
out=""
|
461 |
widgets_l=""
|
@@ -536,10 +527,6 @@ def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encod
|
|
536 |
print('[1;31mNo model found, use the "Model Download" cell to download a model.')
|
537 |
time.sleep(5)
|
538 |
|
539 |
-
if os.path.exists(CAPTIONS_DIR+"off"):
|
540 |
-
call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
|
541 |
-
time.sleep(2)
|
542 |
-
|
543 |
MODELT_NAME=MODEL_NAME
|
544 |
|
545 |
Seed=random.randint(1, 999999)
|
@@ -605,6 +592,7 @@ def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encod
|
|
605 |
def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
|
606 |
call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
|
607 |
'+trnonltxt+' \
|
|
|
608 |
--train_text_encoder \
|
609 |
--image_captions_filename \
|
610 |
--dump_only_text_encoder \
|
@@ -619,14 +607,14 @@ def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encod
|
|
619 |
--gradient_accumulation_steps=1 --gradient_checkpointing \
|
620 |
--use_8bit_adam \
|
621 |
--learning_rate='+str(Text_Encoder_Learning_Rate)+' \
|
622 |
-
--lr_scheduler="
|
623 |
--lr_warmup_steps=0 \
|
624 |
--max_train_steps='+str(Training_Steps), shell=True)
|
625 |
|
626 |
def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps):
|
627 |
clear_output()
|
628 |
if resuming=="Yes":
|
629 |
-
print('[1;32mResuming Training...[0m')
|
630 |
print('[1;33mTraining the UNet...[0m')
|
631 |
call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
|
632 |
'+Style+' \
|
@@ -648,7 +636,7 @@ def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encod
|
|
648 |
--gradient_accumulation_steps=1 '+GCUNET+' \
|
649 |
--use_8bit_adam \
|
650 |
--learning_rate='+str(UNet_Learning_Rate)+' \
|
651 |
-
--lr_scheduler="
|
652 |
--lr_warmup_steps=0 \
|
653 |
--max_train_steps='+str(Training_Steps), shell=True)
|
654 |
|
@@ -684,7 +672,7 @@ def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encod
|
|
684 |
if UNet_Training_Steps!=0:
|
685 |
train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
|
686 |
|
687 |
-
if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and
|
688 |
print('[1;32mNothing to do')
|
689 |
else:
|
690 |
if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
|
@@ -798,7 +786,7 @@ def test(Custom_Path, Previous_Session_Name, Session_Name, User, Password, Use_l
|
|
798 |
os.chdir('/notebooks/sd/stable-diffusion-webui')
|
799 |
clear_output()
|
800 |
|
801 |
-
configf="--disable-console-progressbars --no-half-vae --disable-safe-unpickle --api --xformers --medvram --skip-version-check --ckpt "+path_to_trained_model+" "+auth+" "+share
|
802 |
|
803 |
return configf
|
804 |
|
@@ -885,7 +873,7 @@ def hf(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUT
|
|
885 |
br="[1;33mUploading to HuggingFace : " '[0m|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
|
886 |
return br
|
887 |
|
888 |
-
print("[1;
|
889 |
|
890 |
|
891 |
os.chdir(OUTPUT_DIR)
|
@@ -893,7 +881,7 @@ def hf(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUT
|
|
893 |
call('rm model_index.json', shell=True)
|
894 |
call('git init', shell=True)
|
895 |
call('git lfs install --system --skip-repo', shell=True)
|
896 |
-
call('git remote add -f origin
|
897 |
call('git config core.sparsecheckout true', shell=True)
|
898 |
call('echo -e "\nfeature_extractor\nsafety_checker\nmodel_index.json" > .git/info/sparse-checkout', shell=True)
|
899 |
call('git pull origin main', shell=True)
|
@@ -909,10 +897,8 @@ def hf(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUT
|
|
909 |
- text-to-image
|
910 |
- stable-diffusion
|
911 |
---
|
912 |
-
### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with
|
913 |
|
914 |
-
Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb)
|
915 |
-
Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb)
|
916 |
'''
|
917 |
#Save the readme to a file
|
918 |
readme_file = open("README.md", "w")
|
|
|
38 |
print('[1;32mModules and notebooks updated, dependencies already installed')
|
39 |
|
40 |
else:
|
41 |
+
print('[1;33mInstalling the dependencies...')
|
42 |
call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
|
43 |
if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
|
44 |
os.chdir('/usr/local/lib/python3.9/dist-packages')
|
|
|
141 |
|
142 |
else:
|
143 |
while not os.path.exists(str(CKPT_Path)):
|
144 |
+
print('[1;31mWrong path, use the file explorer to copy the path')
|
145 |
time.sleep(5)
|
146 |
|
147 |
|
|
|
205 |
WORKSPACE='/notebooks/Fast-Dreambooth'
|
206 |
|
207 |
if Session_Link_optional !="":
|
208 |
+
print('[1;33mDownloading session...')
|
209 |
|
210 |
if Session_Link_optional != "":
|
211 |
if not os.path.exists(str(WORKSPACE+'/Sessions')):
|
|
|
343 |
|
344 |
def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
|
345 |
|
|
|
|
|
|
|
|
|
|
|
346 |
if Remove_existing_instance_images:
|
347 |
if os.path.exists(str(INSTANCE_DIR)):
|
348 |
call("rm -r " +INSTANCE_DIR, shell=True)
|
|
|
446 |
|
447 |
|
448 |
def caption(CAPTIONS_DIR, INSTANCE_DIR):
|
449 |
+
|
|
|
|
|
|
|
|
|
450 |
paths=""
|
451 |
out=""
|
452 |
widgets_l=""
|
|
|
527 |
print('[1;31mNo model found, use the "Model Download" cell to download a model.')
|
528 |
time.sleep(5)
|
529 |
|
|
|
|
|
|
|
|
|
530 |
MODELT_NAME=MODEL_NAME
|
531 |
|
532 |
Seed=random.randint(1, 999999)
|
|
|
592 |
def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
|
593 |
call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
|
594 |
'+trnonltxt+' \
|
595 |
+
'+extrnlcptn+' \
|
596 |
--train_text_encoder \
|
597 |
--image_captions_filename \
|
598 |
--dump_only_text_encoder \
|
|
|
607 |
--gradient_accumulation_steps=1 --gradient_checkpointing \
|
608 |
--use_8bit_adam \
|
609 |
--learning_rate='+str(Text_Encoder_Learning_Rate)+' \
|
610 |
+
--lr_scheduler="linear" \
|
611 |
--lr_warmup_steps=0 \
|
612 |
--max_train_steps='+str(Training_Steps), shell=True)
|
613 |
|
614 |
def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps):
|
615 |
clear_output()
|
616 |
if resuming=="Yes":
|
617 |
+
print('[1;32mResuming Training...[0m')
|
618 |
print('[1;33mTraining the UNet...[0m')
|
619 |
call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
|
620 |
'+Style+' \
|
|
|
636 |
--gradient_accumulation_steps=1 '+GCUNET+' \
|
637 |
--use_8bit_adam \
|
638 |
--learning_rate='+str(UNet_Learning_Rate)+' \
|
639 |
+
--lr_scheduler="linear" \
|
640 |
--lr_warmup_steps=0 \
|
641 |
--max_train_steps='+str(Training_Steps), shell=True)
|
642 |
|
|
|
672 |
if UNet_Training_Steps!=0:
|
673 |
train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
|
674 |
|
675 |
+
if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and Text_Encoder_Training_Steps==0 :
|
676 |
print('[1;32mNothing to do')
|
677 |
else:
|
678 |
if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
|
|
|
786 |
os.chdir('/notebooks/sd/stable-diffusion-webui')
|
787 |
clear_output()
|
788 |
|
789 |
+
configf="--disable-console-progressbars --no-half-vae --disable-safe-unpickle --api --xformers --enable-insecure-extension-access --medvram --skip-version-check --ckpt "+path_to_trained_model+" "+auth+" "+share
|
790 |
|
791 |
return configf
|
792 |
|
|
|
873 |
br="[1;33mUploading to HuggingFace : " '[0m|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
|
874 |
return br
|
875 |
|
876 |
+
print("[1;33mLoading...")
|
877 |
|
878 |
|
879 |
os.chdir(OUTPUT_DIR)
|
|
|
881 |
call('rm model_index.json', shell=True)
|
882 |
call('git init', shell=True)
|
883 |
call('git lfs install --system --skip-repo', shell=True)
|
884 |
+
call('git remote add -f origin https://huggingface.co/runwayml/stable-diffusion-v1-5', shell=True)
|
885 |
call('git config core.sparsecheckout true', shell=True)
|
886 |
call('echo -e "\nfeature_extractor\nsafety_checker\nmodel_index.json" > .git/info/sparse-checkout', shell=True)
|
887 |
call('git pull origin main', shell=True)
|
|
|
897 |
- text-to-image
|
898 |
- stable-diffusion
|
899 |
---
|
900 |
+
### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with TheLastBen's fast-DreamBooth notebook
|
901 |
|
|
|
|
|
902 |
'''
|
903 |
#Save the readme to a file
|
904 |
readme_file = open("README.md", "w")
|