bhuvanmdev commited on
Commit
93ca783
1 Parent(s): 2b3fceb

Training in progress, step 760, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -21,9 +21,9 @@
21
  "revision": null,
22
  "target_modules": [
23
  "qkv_proj",
24
- "gate_up_proj",
25
  "down_proj",
26
- "o_proj"
 
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
 
21
  "revision": null,
22
  "target_modules": [
23
  "qkv_proj",
 
24
  "down_proj",
25
+ "o_proj",
26
+ "gate_up_proj"
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:745ed45ba46e27d4a276a4bd62a985fbd8934270c510745c037ea7283b997c5a
3
  size 100697728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a21005b0c25f84187a6d837e3d90bc53f433f50e714145c05d0b17b4d6481985
3
  size 100697728
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3666732966e75b7dddea0f14706f3ce6fbe99dbf7fd649207c5ed62a6184fa42
3
  size 201541754
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f8e8c28b45c3d4944833b51532fd8aea3a328fbd53d387d4481409ba0bd1c56
3
  size 201541754
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8db1fe9840930b9fd7409bb742ad453c84b28c1800a3abc1b4cce223762b144f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:744f87a373b7e8aca2b7ead73d8f8e4c8c1a6ad775f7cdeecb51f2b91e13de93
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f129e6dbcdc516e08a9aab8d7cc9efeb682288e6509845b97627189c47790bc6
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4433212656c2840008788d3ef4721ac521814715c0310491af53892cc2f91958
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.26475849731663686,
5
  "eval_steps": 500,
6
- "global_step": 740,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -599,14 +599,30 @@
599
  "loss": 0.4178,
600
  "num_input_tokens_seen": 494747,
601
  "step": 740
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
602
  }
603
  ],
604
  "logging_steps": 10,
605
  "max_steps": 2795,
606
- "num_input_tokens_seen": 494747,
607
  "num_train_epochs": 1,
608
  "save_steps": 20,
609
- "total_flos": 1.1125112884402176e+16,
610
  "train_batch_size": 1,
611
  "trial_name": null,
612
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.27191413237924866,
5
  "eval_steps": 500,
6
+ "global_step": 760,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
599
  "loss": 0.4178,
600
  "num_input_tokens_seen": 494747,
601
  "step": 740
602
+ },
603
+ {
604
+ "epoch": 0.26833631484794274,
605
+ "grad_norm": 0.29682761430740356,
606
+ "learning_rate": 0.00014633273703041147,
607
+ "loss": 0.4309,
608
+ "num_input_tokens_seen": 501076,
609
+ "step": 750
610
+ },
611
+ {
612
+ "epoch": 0.27191413237924866,
613
+ "grad_norm": 0.44356122612953186,
614
+ "learning_rate": 0.00014561717352415027,
615
+ "loss": 0.4113,
616
+ "num_input_tokens_seen": 508236,
617
+ "step": 760
618
  }
619
  ],
620
  "logging_steps": 10,
621
  "max_steps": 2795,
622
+ "num_input_tokens_seen": 508236,
623
  "num_train_epochs": 1,
624
  "save_steps": 20,
625
+ "total_flos": 1.1428432859455488e+16,
626
  "train_batch_size": 1,
627
  "trial_name": null,
628
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e03bc6a1791d1744856a78e4eb438a0c0f57a4d80f850b6de88464095403a4fa
3
- size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bee6a61298e4ba0636e6bf836a1f169e31db19f42b2bb480b9f85ccd38a9c1e
3
+ size 5048