VikramSingh178 commited on
Commit
d2776c8
1 Parent(s): 6b352dc

Refactor .gitignore, config.py, and wandb scripts, and update requirements.txt

Browse files
.gitignore CHANGED
@@ -1 +1,2 @@
1
  .venv
 
 
1
  .venv
2
+ scripts/wandb
scripts/__pycache__/config.cpython-310.pyc CHANGED
Binary files a/scripts/__pycache__/config.cpython-310.pyc and b/scripts/__pycache__/config.cpython-310.pyc differ
 
scripts/config.py CHANGED
@@ -17,16 +17,16 @@ class Config:
17
  self.validation_prompt = None
18
  self.num_validation_images = 4
19
  self.validation_epochs = 1
20
- self.max_train_samples = None
21
  self.output_dir = "output"
22
  self.cache_dir = None
23
- self.seed = None
24
- self.resolution = 1024
25
- self.center_crop = False
26
- self.random_flip = False
27
  self.train_text_encoder = False
28
- self.train_batch_size = 16
29
- self.num_train_epochs = 200
30
  self.max_train_steps = None
31
  self.checkpointing_steps = 500
32
  self.checkpoints_total_limit = None
@@ -38,7 +38,7 @@ class Config:
38
  self.lr_scheduler = "constant"
39
  self.lr_warmup_steps = 500
40
  self.snr_gamma = None
41
- self.allow_tf32 = False
42
  self.dataloader_num_workers = 0
43
  self.use_8bit_adam = True
44
  self.adam_beta1 = 0.9
@@ -46,13 +46,13 @@ class Config:
46
  self.adam_weight_decay = 1e-2
47
  self.adam_epsilon = 1e-08
48
  self.max_grad_norm = 1.0
49
- self.push_to_hub = False
50
  self.hub_token = None
51
  self.prediction_type = None
52
  self.hub_model_id = None
53
  self.logging_dir = "logs"
54
  self.report_to = "wandb"
55
- self.mixed_precision = None
56
  self.local_rank = -1
57
  self.enable_xformers_memory_efficient_attention = False
58
  self.noise_offset = 0
 
17
  self.validation_prompt = None
18
  self.num_validation_images = 4
19
  self.validation_epochs = 1
20
+ self.max_train_samples = 7
21
  self.output_dir = "output"
22
  self.cache_dir = None
23
+ self.seed = 42
24
+ self.resolution = 512
25
+ self.center_crop = True
26
+ self.random_flip = True
27
  self.train_text_encoder = False
28
+ self.train_batch_size = 64
29
+ self.num_train_epochs = 400
30
  self.max_train_steps = None
31
  self.checkpointing_steps = 500
32
  self.checkpoints_total_limit = None
 
38
  self.lr_scheduler = "constant"
39
  self.lr_warmup_steps = 500
40
  self.snr_gamma = None
41
+ self.allow_tf32 = True
42
  self.dataloader_num_workers = 0
43
  self.use_8bit_adam = True
44
  self.adam_beta1 = 0.9
 
46
  self.adam_weight_decay = 1e-2
47
  self.adam_epsilon = 1e-08
48
  self.max_grad_norm = 1.0
49
+ self.push_to_hub = True
50
  self.hub_token = None
51
  self.prediction_type = None
52
  self.hub_model_id = None
53
  self.logging_dir = "logs"
54
  self.report_to = "wandb"
55
+ self.mixed_precision = 'fp16'
56
  self.local_rank = -1
57
  self.enable_xformers_memory_efficient_attention = False
58
  self.noise_offset = 0
scripts/output/checkpoint-1000/optimizer.bin.REMOVED.git-id ADDED
@@ -0,0 +1 @@
 
 
1
+ 74a152223565564def6ad6a76129bf59a0171b8f
scripts/output/checkpoint-1000/pytorch_lora_weights.safetensors.REMOVED.git-id ADDED
@@ -0,0 +1 @@
 
 
1
+ dfbc6651386c693d47d1944b179470ea50e68756
scripts/output/checkpoint-1000/random_states_0.pkl ADDED
Binary file (14.4 kB). View file
 
scripts/output/checkpoint-1000/scaler.pt ADDED
Binary file (988 Bytes). View file
 
scripts/output/checkpoint-1000/scheduler.bin ADDED
Binary file (1 kB). View file
 
scripts/output/checkpoint-500/optimizer.bin.REMOVED.git-id ADDED
@@ -0,0 +1 @@
 
 
1
+ c619da21bfd5d567bd60e03fbaf1c29d30578baf
scripts/output/checkpoint-500/pytorch_lora_weights.safetensors.REMOVED.git-id ADDED
@@ -0,0 +1 @@
 
 
1
+ bb4f104bc6e519ebd9ff69e8f59c3001beaa0b91
scripts/output/checkpoint-500/random_states_0.pkl ADDED
Binary file (14.3 kB). View file
 
scripts/output/checkpoint-500/scaler.pt ADDED
Binary file (988 Bytes). View file
 
scripts/output/checkpoint-500/scheduler.bin ADDED
Binary file (1 kB). View file
 
scripts/output/pytorch_lora_weights.safetensors.REMOVED.git-id ADDED
@@ -0,0 +1 @@
 
 
1
+ dafdc032f1a8f320ac0e888bcf5156500f5da7dc
scripts/sdxl_lora_tuner.py CHANGED
@@ -7,9 +7,13 @@ import numpy as np
7
  import torch
8
  import torch.nn.functional as F
9
  import transformers
10
- from accelerate import Accelerator
11
  from accelerate.logging import get_logger
12
- from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
 
 
 
 
13
  from datasets import load_dataset
14
  from huggingface_hub import create_repo, upload_folder
15
  from packaging import version
 
7
  import torch
8
  import torch.nn.functional as F
9
  import transformers
10
+ from accelerate import Accelerator
11
  from accelerate.logging import get_logger
12
+ from accelerate.utils import (
13
+ DistributedDataParallelKwargs,
14
+ ProjectConfiguration,
15
+ set_seed,
16
+ )
17
  from datasets import load_dataset
18
  from huggingface_hub import create_repo, upload_folder
19
  from packaging import version