eeeebbb2 commited on
Commit
800d8ef
·
verified ·
1 Parent(s): c57bdcf

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f73a1e23a417367c5a9668264f1eb9f0ae978643897a10430d165db48639a26f
3
  size 166182480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c18b924e9f94faa2194c7dd165d815f5bad3562d15d7d8755923a4efc4d3c166
3
  size 166182480
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6fd8261692559781739203e2597ba1dd46a9fa144476d53d3f25d02117ea6f9d
3
  size 332574358
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a961572d93f460389a432fa1bc4312972bbda4259583020c0e5dc03bc63f0af8
3
  size 332574358
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:09f680f1df5166c32096111cd9904604cb506551c7aa7236fc44053f1327df18
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f93a1d0de4f7c19691454ad529d0353516a3e1eb23d9afad1c3d52995855276f
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:55d3e7c1ed0c97284af244303bca6a5d088d8cb3b01d30724439d8b3ccafefb8
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcb75c6922499baeeed67a00e449f5c50cc9e2e1d8c3c20af2cdb8e08ee582bb
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e1b0f4f26c760c2d49353619e173b0a05b570d23be5bc83f03b5b6f0e3ceb5b2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e6d2520db646d11fc16ab1481a74170d3f4375da45df57d7d1ae1e4f9c79f3b
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8ef00efea6ec0eb927725c06f8b8e9493d8000e50113a39e9fe9bb49143fde6
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf6e900df2749ef46238c8122fd74ed7b619f9726a98de0c4df1a9719ca0f99f
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:370cee31176b8bff781da8f054b9870dc93c63a8623674218a84718aa7abd3af
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:051dee7dfbeecb34b46e8409ffafec324501f465585234624669bc8c9e863ae4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.0358448028564453,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-75",
4
- "epoch": 0.035737685389242956,
5
  "eval_steps": 25,
6
- "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -564,6 +564,189 @@
564
  "eval_samples_per_second": 24.129,
565
  "eval_steps_per_second": 6.273,
566
  "step": 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
567
  }
568
  ],
569
  "logging_steps": 1,
@@ -587,12 +770,12 @@
587
  "should_evaluate": false,
588
  "should_log": false,
589
  "should_save": true,
590
- "should_training_stop": false
591
  },
592
  "attributes": {}
593
  }
594
  },
595
- "total_flos": 2.43721341763584e+17,
596
  "train_batch_size": 1,
597
  "trial_name": null,
598
  "trial_params": null
 
1
  {
2
+ "best_metric": 2.0155577659606934,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 0.047650247185657274,
5
  "eval_steps": 25,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
564
  "eval_samples_per_second": 24.129,
565
  "eval_steps_per_second": 6.273,
566
  "step": 75
567
+ },
568
+ {
569
+ "epoch": 0.03621418786109953,
570
+ "grad_norm": 1.061888575553894,
571
+ "learning_rate": 2.3180194846605367e-05,
572
+ "loss": 2.0602,
573
+ "step": 76
574
+ },
575
+ {
576
+ "epoch": 0.036690690332956105,
577
+ "grad_norm": 1.073085904121399,
578
+ "learning_rate": 2.215611672859741e-05,
579
+ "loss": 2.1428,
580
+ "step": 77
581
+ },
582
+ {
583
+ "epoch": 0.037167192804812677,
584
+ "grad_norm": 1.0822275876998901,
585
+ "learning_rate": 2.1167208663446025e-05,
586
+ "loss": 2.1602,
587
+ "step": 78
588
+ },
589
+ {
590
+ "epoch": 0.03764369527666925,
591
+ "grad_norm": 1.0458821058273315,
592
+ "learning_rate": 2.0214529598676836e-05,
593
+ "loss": 2.055,
594
+ "step": 79
595
+ },
596
+ {
597
+ "epoch": 0.03812019774852582,
598
+ "grad_norm": 1.0914690494537354,
599
+ "learning_rate": 1.9299099686894423e-05,
600
+ "loss": 2.0903,
601
+ "step": 80
602
+ },
603
+ {
604
+ "epoch": 0.03859670022038239,
605
+ "grad_norm": 1.0751844644546509,
606
+ "learning_rate": 1.842189919337732e-05,
607
+ "loss": 2.0763,
608
+ "step": 81
609
+ },
610
+ {
611
+ "epoch": 0.03907320269223897,
612
+ "grad_norm": 1.065314531326294,
613
+ "learning_rate": 1.758386744638546e-05,
614
+ "loss": 2.0768,
615
+ "step": 82
616
+ },
617
+ {
618
+ "epoch": 0.03954970516409554,
619
+ "grad_norm": 0.9963571429252625,
620
+ "learning_rate": 1.6785901831303956e-05,
621
+ "loss": 2.1178,
622
+ "step": 83
623
+ },
624
+ {
625
+ "epoch": 0.04002620763595211,
626
+ "grad_norm": 1.0645560026168823,
627
+ "learning_rate": 1.602885682970026e-05,
628
+ "loss": 2.0319,
629
+ "step": 84
630
+ },
631
+ {
632
+ "epoch": 0.04050271010780868,
633
+ "grad_norm": 1.1069632768630981,
634
+ "learning_rate": 1.531354310432403e-05,
635
+ "loss": 2.1816,
636
+ "step": 85
637
+ },
638
+ {
639
+ "epoch": 0.040979212579665254,
640
+ "grad_norm": 1.0914970636367798,
641
+ "learning_rate": 1.464072663102903e-05,
642
+ "loss": 2.0122,
643
+ "step": 86
644
+ },
645
+ {
646
+ "epoch": 0.04145571505152183,
647
+ "grad_norm": 1.220619797706604,
648
+ "learning_rate": 1.4011127878547087e-05,
649
+ "loss": 2.26,
650
+ "step": 87
651
+ },
652
+ {
653
+ "epoch": 0.041932217523378404,
654
+ "grad_norm": 1.2335286140441895,
655
+ "learning_rate": 1.3425421036992098e-05,
656
+ "loss": 2.2017,
657
+ "step": 88
658
+ },
659
+ {
660
+ "epoch": 0.042408719995234975,
661
+ "grad_norm": 1.0788054466247559,
662
+ "learning_rate": 1.2884233295920353e-05,
663
+ "loss": 2.1318,
664
+ "step": 89
665
+ },
666
+ {
667
+ "epoch": 0.04288522246709155,
668
+ "grad_norm": 1.0492669343948364,
669
+ "learning_rate": 1.2388144172720251e-05,
670
+ "loss": 2.0355,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 0.04336172493894812,
675
+ "grad_norm": 1.109262466430664,
676
+ "learning_rate": 1.1937684892050604e-05,
677
+ "loss": 2.0891,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 0.043838227410804696,
682
+ "grad_norm": 1.066548228263855,
683
+ "learning_rate": 1.1533337816991932e-05,
684
+ "loss": 1.9607,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 0.04431472988266127,
689
+ "grad_norm": 1.1744662523269653,
690
+ "learning_rate": 1.1175535932519987e-05,
691
+ "loss": 2.1241,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 0.04479123235451784,
696
+ "grad_norm": 1.099749207496643,
697
+ "learning_rate": 1.0864662381854632e-05,
698
+ "loss": 2.1312,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 0.04526773482637441,
703
+ "grad_norm": 1.049397349357605,
704
+ "learning_rate": 1.0601050056180447e-05,
705
+ "loss": 2.0349,
706
+ "step": 95
707
+ },
708
+ {
709
+ "epoch": 0.04574423729823098,
710
+ "grad_norm": 1.1223000288009644,
711
+ "learning_rate": 1.0384981238178534e-05,
712
+ "loss": 2.1497,
713
+ "step": 96
714
+ },
715
+ {
716
+ "epoch": 0.04622073977008756,
717
+ "grad_norm": 1.1168677806854248,
718
+ "learning_rate": 1.0216687299751144e-05,
719
+ "loss": 2.0932,
720
+ "step": 97
721
+ },
722
+ {
723
+ "epoch": 0.04669724224194413,
724
+ "grad_norm": 1.1823430061340332,
725
+ "learning_rate": 1.0096348454262845e-05,
726
+ "loss": 2.1753,
727
+ "step": 98
728
+ },
729
+ {
730
+ "epoch": 0.0471737447138007,
731
+ "grad_norm": 1.2020727396011353,
732
+ "learning_rate": 1.0024093563563546e-05,
733
+ "loss": 2.1313,
734
+ "step": 99
735
+ },
736
+ {
737
+ "epoch": 0.047650247185657274,
738
+ "grad_norm": 1.3995245695114136,
739
+ "learning_rate": 1e-05,
740
+ "loss": 2.3944,
741
+ "step": 100
742
+ },
743
+ {
744
+ "epoch": 0.047650247185657274,
745
+ "eval_loss": 2.0155577659606934,
746
+ "eval_runtime": 2.0739,
747
+ "eval_samples_per_second": 24.109,
748
+ "eval_steps_per_second": 6.268,
749
+ "step": 100
750
  }
751
  ],
752
  "logging_steps": 1,
 
770
  "should_evaluate": false,
771
  "should_log": false,
772
  "should_save": true,
773
+ "should_training_stop": true
774
  },
775
  "attributes": {}
776
  }
777
  },
778
+ "total_flos": 3.24961789018112e+17,
779
  "train_batch_size": 1,
780
  "trial_name": null,
781
  "trial_params": null