farmery commited on
Commit
e440c85
·
verified ·
1 Parent(s): 5b9ca88

Training in progress, step 150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:784524aa3579aae3c3b253401b3dc775e297a056b39aada039f0311142f0470c
3
  size 1521616
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e177a55a4f8968030141b68018bb00b657e902abe021c6295443a76822d37319
3
  size 1521616
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5e7e4ae7431bf4f77aa7fa638df23b6e0c4657326b7b9b6d05010cb3c8f37b80
3
  size 3108666
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9d55e8407570a07e057e59aa40756a92f640ad3c3b2cadc7b3ccb8aebdf144a
3
  size 3108666
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bac44978e852762998f2162e9ea4a9420db1707864946b44e6a7ab228de13111
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f122f27ff41d75d8242038bdb7dcc56b03a3ee0fef27e0813a0e5bd33facded
3
  size 14960
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cbc53838d71d7ffbc88192ec7303d7e6bbca69387969f4dd64884419c5fd8735
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a7ca8e55af6b4feda97946cf69d286f368ca3c1d50f3951c0dcbfa4faae95ae
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d38ca30957f1bae8c2f8be41072eb1e4564215c1b03164f3762535e35c2f63d5
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8207164e65c023390018e471df1c8a2d6b63497b02538e7c9479a46941ee2ec7
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bfa6812679ba3104c6d0c851d5e9ffb6d9a1a191ceb230d4b225523e0da3d8aa
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faf9d6db6d338497d7f96bb6781178aab234997fa413b22da388194e8a349725
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:438a31e36d5952d71d9fab09b7d68fe448c33272560b3c248a5c2466ee58e54e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:028a9b4a00b26e663bf1479c01a19ca4776fdbabc2a9b3b6ed73cd614df36408
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.987208366394043,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
- "epoch": 1.8465303771955082,
5
  "eval_steps": 25,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -747,6 +747,372 @@
747
  "eval_samples_per_second": 258.808,
748
  "eval_steps_per_second": 67.29,
749
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
750
  }
751
  ],
752
  "logging_steps": 1,
@@ -775,7 +1141,7 @@
775
  "attributes": {}
776
  }
777
  },
778
- "total_flos": 928417715322880.0,
779
  "train_batch_size": 1,
780
  "trial_name": null,
781
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.972500205039978,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-150",
4
+ "epoch": 2.771667146559171,
5
  "eval_steps": 25,
6
+ "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
747
  "eval_samples_per_second": 258.808,
748
  "eval_steps_per_second": 67.29,
749
  "step": 100
750
+ },
751
+ {
752
+ "epoch": 1.8649582493521453,
753
+ "grad_norm": 0.6175363659858704,
754
+ "learning_rate": 6.377906449072578e-05,
755
+ "loss": 1.937,
756
+ "step": 101
757
+ },
758
+ {
759
+ "epoch": 1.883386121508782,
760
+ "grad_norm": 0.5189071297645569,
761
+ "learning_rate": 6.311147670162576e-05,
762
+ "loss": 2.0043,
763
+ "step": 102
764
+ },
765
+ {
766
+ "epoch": 1.901813993665419,
767
+ "grad_norm": 0.7783696055412292,
768
+ "learning_rate": 6.244203851625526e-05,
769
+ "loss": 1.9778,
770
+ "step": 103
771
+ },
772
+ {
773
+ "epoch": 1.9202418658220557,
774
+ "grad_norm": 1.224826693534851,
775
+ "learning_rate": 6.177090264736525e-05,
776
+ "loss": 2.0057,
777
+ "step": 104
778
+ },
779
+ {
780
+ "epoch": 1.9386697379786928,
781
+ "grad_norm": 2.2593610286712646,
782
+ "learning_rate": 6.109822219498354e-05,
783
+ "loss": 1.9667,
784
+ "step": 105
785
+ },
786
+ {
787
+ "epoch": 1.9570976101353297,
788
+ "grad_norm": 1.2801239490509033,
789
+ "learning_rate": 6.042415061148954e-05,
790
+ "loss": 1.9922,
791
+ "step": 106
792
+ },
793
+ {
794
+ "epoch": 1.9755254822919666,
795
+ "grad_norm": 0.8543593883514404,
796
+ "learning_rate": 5.9748841666608565e-05,
797
+ "loss": 1.9925,
798
+ "step": 107
799
+ },
800
+ {
801
+ "epoch": 1.9939533544486037,
802
+ "grad_norm": 0.5854929685592651,
803
+ "learning_rate": 5.907244941233371e-05,
804
+ "loss": 1.9671,
805
+ "step": 108
806
+ },
807
+ {
808
+ "epoch": 2.016124388137057,
809
+ "grad_norm": 2.585685968399048,
810
+ "learning_rate": 5.8395128147783474e-05,
811
+ "loss": 4.0407,
812
+ "step": 109
813
+ },
814
+ {
815
+ "epoch": 2.0345522602936943,
816
+ "grad_norm": 1.6653348207473755,
817
+ "learning_rate": 5.771703238400288e-05,
818
+ "loss": 2.0671,
819
+ "step": 110
820
+ },
821
+ {
822
+ "epoch": 2.052980132450331,
823
+ "grad_norm": 0.6874579191207886,
824
+ "learning_rate": 5.703831680871631e-05,
825
+ "loss": 1.9964,
826
+ "step": 111
827
+ },
828
+ {
829
+ "epoch": 2.071408004606968,
830
+ "grad_norm": 0.906933605670929,
831
+ "learning_rate": 5.635913625104e-05,
832
+ "loss": 1.9681,
833
+ "step": 112
834
+ },
835
+ {
836
+ "epoch": 2.089835876763605,
837
+ "grad_norm": 0.7688360214233398,
838
+ "learning_rate": 5.567964564616237e-05,
839
+ "loss": 2.0265,
840
+ "step": 113
841
+ },
842
+ {
843
+ "epoch": 2.108263748920242,
844
+ "grad_norm": 1.0667365789413452,
845
+ "learning_rate": 5.500000000000001e-05,
846
+ "loss": 1.9802,
847
+ "step": 114
848
+ },
849
+ {
850
+ "epoch": 2.126691621076879,
851
+ "grad_norm": 1.1549663543701172,
852
+ "learning_rate": 5.432035435383764e-05,
853
+ "loss": 2.0294,
854
+ "step": 115
855
+ },
856
+ {
857
+ "epoch": 2.1451194932335156,
858
+ "grad_norm": 0.9386123418807983,
859
+ "learning_rate": 5.364086374896001e-05,
860
+ "loss": 1.9267,
861
+ "step": 116
862
+ },
863
+ {
864
+ "epoch": 2.1635473653901527,
865
+ "grad_norm": 0.7642337679862976,
866
+ "learning_rate": 5.296168319128372e-05,
867
+ "loss": 1.9907,
868
+ "step": 117
869
+ },
870
+ {
871
+ "epoch": 2.1819752375467893,
872
+ "grad_norm": 0.5543763041496277,
873
+ "learning_rate": 5.2282967615997125e-05,
874
+ "loss": 1.9707,
875
+ "step": 118
876
+ },
877
+ {
878
+ "epoch": 2.2004031097034265,
879
+ "grad_norm": 0.7983617186546326,
880
+ "learning_rate": 5.160487185221653e-05,
881
+ "loss": 1.9696,
882
+ "step": 119
883
+ },
884
+ {
885
+ "epoch": 2.218830981860063,
886
+ "grad_norm": 0.3363061845302582,
887
+ "learning_rate": 5.092755058766631e-05,
888
+ "loss": 1.9536,
889
+ "step": 120
890
+ },
891
+ {
892
+ "epoch": 2.2372588540167,
893
+ "grad_norm": 0.9921276569366455,
894
+ "learning_rate": 5.025115833339146e-05,
895
+ "loss": 2.0054,
896
+ "step": 121
897
+ },
898
+ {
899
+ "epoch": 2.2556867261733373,
900
+ "grad_norm": 1.2545318603515625,
901
+ "learning_rate": 4.9575849388510473e-05,
902
+ "loss": 1.9755,
903
+ "step": 122
904
+ },
905
+ {
906
+ "epoch": 2.274114598329974,
907
+ "grad_norm": 1.0431549549102783,
908
+ "learning_rate": 4.890177780501648e-05,
909
+ "loss": 2.047,
910
+ "step": 123
911
+ },
912
+ {
913
+ "epoch": 2.292542470486611,
914
+ "grad_norm": 0.9374226331710815,
915
+ "learning_rate": 4.8229097352634765e-05,
916
+ "loss": 1.9859,
917
+ "step": 124
918
+ },
919
+ {
920
+ "epoch": 2.3109703426432477,
921
+ "grad_norm": 1.3169200420379639,
922
+ "learning_rate": 4.755796148374475e-05,
923
+ "loss": 1.957,
924
+ "step": 125
925
+ },
926
+ {
927
+ "epoch": 2.3109703426432477,
928
+ "eval_loss": 1.9774408340454102,
929
+ "eval_runtime": 0.1937,
930
+ "eval_samples_per_second": 258.081,
931
+ "eval_steps_per_second": 67.101,
932
+ "step": 125
933
+ },
934
+ {
935
+ "epoch": 2.329398214799885,
936
+ "grad_norm": 0.6126213669776917,
937
+ "learning_rate": 4.688852329837424e-05,
938
+ "loss": 1.9839,
939
+ "step": 126
940
+ },
941
+ {
942
+ "epoch": 2.3478260869565215,
943
+ "grad_norm": 1.152384638786316,
944
+ "learning_rate": 4.6220935509274235e-05,
945
+ "loss": 1.9543,
946
+ "step": 127
947
+ },
948
+ {
949
+ "epoch": 2.3662539591131586,
950
+ "grad_norm": 0.6893303394317627,
951
+ "learning_rate": 4.5555350407081863e-05,
952
+ "loss": 1.9423,
953
+ "step": 128
954
+ },
955
+ {
956
+ "epoch": 2.3846818312697957,
957
+ "grad_norm": 0.8432124853134155,
958
+ "learning_rate": 4.489191982557984e-05,
959
+ "loss": 2.0398,
960
+ "step": 129
961
+ },
962
+ {
963
+ "epoch": 2.4031097034264324,
964
+ "grad_norm": 0.5620063543319702,
965
+ "learning_rate": 4.423079510705992e-05,
966
+ "loss": 1.9957,
967
+ "step": 130
968
+ },
969
+ {
970
+ "epoch": 2.4215375755830695,
971
+ "grad_norm": 1.0304889678955078,
972
+ "learning_rate": 4.357212706779864e-05,
973
+ "loss": 2.0187,
974
+ "step": 131
975
+ },
976
+ {
977
+ "epoch": 2.439965447739706,
978
+ "grad_norm": 0.32691672444343567,
979
+ "learning_rate": 4.291606596365304e-05,
980
+ "loss": 1.952,
981
+ "step": 132
982
+ },
983
+ {
984
+ "epoch": 2.4583933198963432,
985
+ "grad_norm": 0.4635773301124573,
986
+ "learning_rate": 4.226276145578408e-05,
987
+ "loss": 1.9837,
988
+ "step": 133
989
+ },
990
+ {
991
+ "epoch": 2.47682119205298,
992
+ "grad_norm": 1.219428300857544,
993
+ "learning_rate": 4.161236257651587e-05,
994
+ "loss": 1.9887,
995
+ "step": 134
996
+ },
997
+ {
998
+ "epoch": 2.495249064209617,
999
+ "grad_norm": 0.8382700681686401,
1000
+ "learning_rate": 4.09650176953383e-05,
1001
+ "loss": 1.9683,
1002
+ "step": 135
1003
+ },
1004
+ {
1005
+ "epoch": 2.513676936366254,
1006
+ "grad_norm": 0.7893387675285339,
1007
+ "learning_rate": 4.032087448506089e-05,
1008
+ "loss": 1.9834,
1009
+ "step": 136
1010
+ },
1011
+ {
1012
+ "epoch": 2.5321048085228908,
1013
+ "grad_norm": 0.38780632615089417,
1014
+ "learning_rate": 3.968007988812552e-05,
1015
+ "loss": 1.9808,
1016
+ "step": 137
1017
+ },
1018
+ {
1019
+ "epoch": 2.550532680679528,
1020
+ "grad_norm": 0.579411506652832,
1021
+ "learning_rate": 3.904278008308589e-05,
1022
+ "loss": 1.9948,
1023
+ "step": 138
1024
+ },
1025
+ {
1026
+ "epoch": 2.5689605528361645,
1027
+ "grad_norm": 0.5201661586761475,
1028
+ "learning_rate": 3.840912045126106e-05,
1029
+ "loss": 2.0085,
1030
+ "step": 139
1031
+ },
1032
+ {
1033
+ "epoch": 2.5873884249928016,
1034
+ "grad_norm": 0.2307479977607727,
1035
+ "learning_rate": 3.777924554357096e-05,
1036
+ "loss": 1.9852,
1037
+ "step": 140
1038
+ },
1039
+ {
1040
+ "epoch": 2.6058162971494383,
1041
+ "grad_norm": 0.4744541645050049,
1042
+ "learning_rate": 3.715329904756143e-05,
1043
+ "loss": 1.9508,
1044
+ "step": 141
1045
+ },
1046
+ {
1047
+ "epoch": 2.6242441693060754,
1048
+ "grad_norm": 0.33469077944755554,
1049
+ "learning_rate": 3.653142375462596e-05,
1050
+ "loss": 1.9608,
1051
+ "step": 142
1052
+ },
1053
+ {
1054
+ "epoch": 2.6426720414627125,
1055
+ "grad_norm": 0.2714967429637909,
1056
+ "learning_rate": 3.591376152743211e-05,
1057
+ "loss": 1.9771,
1058
+ "step": 143
1059
+ },
1060
+ {
1061
+ "epoch": 2.661099913619349,
1062
+ "grad_norm": 0.6618818044662476,
1063
+ "learning_rate": 3.530045326755967e-05,
1064
+ "loss": 1.9385,
1065
+ "step": 144
1066
+ },
1067
+ {
1068
+ "epoch": 2.6795277857759863,
1069
+ "grad_norm": 0.7056910991668701,
1070
+ "learning_rate": 3.46916388833581e-05,
1071
+ "loss": 1.9505,
1072
+ "step": 145
1073
+ },
1074
+ {
1075
+ "epoch": 2.697955657932623,
1076
+ "grad_norm": 0.5343828201293945,
1077
+ "learning_rate": 3.408745725803042e-05,
1078
+ "loss": 2.0025,
1079
+ "step": 146
1080
+ },
1081
+ {
1082
+ "epoch": 2.71638353008926,
1083
+ "grad_norm": 1.5520598888397217,
1084
+ "learning_rate": 3.348804621795122e-05,
1085
+ "loss": 1.9265,
1086
+ "step": 147
1087
+ },
1088
+ {
1089
+ "epoch": 2.7348114022458967,
1090
+ "grad_norm": 0.4644526243209839,
1091
+ "learning_rate": 3.2893542501225534e-05,
1092
+ "loss": 2.0005,
1093
+ "step": 148
1094
+ },
1095
+ {
1096
+ "epoch": 2.753239274402534,
1097
+ "grad_norm": 0.3085757791996002,
1098
+ "learning_rate": 3.2304081726495974e-05,
1099
+ "loss": 1.9679,
1100
+ "step": 149
1101
+ },
1102
+ {
1103
+ "epoch": 2.771667146559171,
1104
+ "grad_norm": 1.0300484895706177,
1105
+ "learning_rate": 3.1719798362005444e-05,
1106
+ "loss": 1.9888,
1107
+ "step": 150
1108
+ },
1109
+ {
1110
+ "epoch": 2.771667146559171,
1111
+ "eval_loss": 1.972500205039978,
1112
+ "eval_runtime": 0.1941,
1113
+ "eval_samples_per_second": 257.57,
1114
+ "eval_steps_per_second": 66.968,
1115
+ "step": 150
1116
  }
1117
  ],
1118
  "logging_steps": 1,
 
1141
  "attributes": {}
1142
  }
1143
  },
1144
+ "total_flos": 1392626572984320.0,
1145
  "train_batch_size": 1,
1146
  "trial_name": null,
1147
  "trial_params": null