GeoGPT4V-InternVL-Chat-40B / trainer_state.json
Rosiness's picture
First model version
bf7b7e8
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9986648865153538,
"eval_steps": 500,
"global_step": 187,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 1.6666666666666667e-06,
"loss": 2.7864,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 3.3333333333333333e-06,
"loss": 3.1764,
"step": 2
},
{
"epoch": 0.02,
"learning_rate": 5e-06,
"loss": 2.5692,
"step": 3
},
{
"epoch": 0.02,
"learning_rate": 6.666666666666667e-06,
"loss": 1.0477,
"step": 4
},
{
"epoch": 0.03,
"learning_rate": 8.333333333333334e-06,
"loss": 0.9304,
"step": 5
},
{
"epoch": 0.03,
"learning_rate": 1e-05,
"loss": 0.8711,
"step": 6
},
{
"epoch": 0.04,
"learning_rate": 9.999246866958693e-06,
"loss": 0.6422,
"step": 7
},
{
"epoch": 0.04,
"learning_rate": 9.99698769471852e-06,
"loss": 0.7042,
"step": 8
},
{
"epoch": 0.05,
"learning_rate": 9.993223163862385e-06,
"loss": 0.7917,
"step": 9
},
{
"epoch": 0.05,
"learning_rate": 9.98795440846732e-06,
"loss": 0.6634,
"step": 10
},
{
"epoch": 0.06,
"learning_rate": 9.981183015762831e-06,
"loss": 0.7043,
"step": 11
},
{
"epoch": 0.06,
"learning_rate": 9.972911025652754e-06,
"loss": 0.6872,
"step": 12
},
{
"epoch": 0.07,
"learning_rate": 9.963140930100713e-06,
"loss": 0.7418,
"step": 13
},
{
"epoch": 0.07,
"learning_rate": 9.951875672379424e-06,
"loss": 0.7245,
"step": 14
},
{
"epoch": 0.08,
"learning_rate": 9.939118646184007e-06,
"loss": 0.7227,
"step": 15
},
{
"epoch": 0.09,
"learning_rate": 9.924873694609636e-06,
"loss": 0.6843,
"step": 16
},
{
"epoch": 0.09,
"learning_rate": 9.909145108993794e-06,
"loss": 0.65,
"step": 17
},
{
"epoch": 0.1,
"learning_rate": 9.891937627623486e-06,
"loss": 0.7358,
"step": 18
},
{
"epoch": 0.1,
"learning_rate": 9.873256434307828e-06,
"loss": 0.763,
"step": 19
},
{
"epoch": 0.11,
"learning_rate": 9.853107156816393e-06,
"loss": 0.7882,
"step": 20
},
{
"epoch": 0.11,
"learning_rate": 9.831495865183832e-06,
"loss": 0.6681,
"step": 21
},
{
"epoch": 0.12,
"learning_rate": 9.808429069881267e-06,
"loss": 0.6109,
"step": 22
},
{
"epoch": 0.12,
"learning_rate": 9.783913719854977e-06,
"loss": 0.7464,
"step": 23
},
{
"epoch": 0.13,
"learning_rate": 9.757957200433011e-06,
"loss": 0.7748,
"step": 24
},
{
"epoch": 0.13,
"learning_rate": 9.730567331100333e-06,
"loss": 0.6528,
"step": 25
},
{
"epoch": 0.14,
"learning_rate": 9.701752363143183e-06,
"loss": 0.7256,
"step": 26
},
{
"epoch": 0.14,
"learning_rate": 9.67152097716334e-06,
"loss": 0.6889,
"step": 27
},
{
"epoch": 0.15,
"learning_rate": 9.639882280463071e-06,
"loss": 0.728,
"step": 28
},
{
"epoch": 0.15,
"learning_rate": 9.606845804301523e-06,
"loss": 0.7518,
"step": 29
},
{
"epoch": 0.16,
"learning_rate": 9.572421501023403e-06,
"loss": 0.7482,
"step": 30
},
{
"epoch": 0.17,
"learning_rate": 9.536619741060799e-06,
"loss": 0.6835,
"step": 31
},
{
"epoch": 0.17,
"learning_rate": 9.499451309809058e-06,
"loss": 0.6588,
"step": 32
},
{
"epoch": 0.18,
"learning_rate": 9.460927404377647e-06,
"loss": 0.8194,
"step": 33
},
{
"epoch": 0.18,
"learning_rate": 9.421059630216992e-06,
"loss": 0.6716,
"step": 34
},
{
"epoch": 0.19,
"learning_rate": 9.37985999762229e-06,
"loss": 0.7035,
"step": 35
},
{
"epoch": 0.19,
"learning_rate": 9.337340918115385e-06,
"loss": 0.7737,
"step": 36
},
{
"epoch": 0.2,
"learning_rate": 9.29351520070574e-06,
"loss": 0.7908,
"step": 37
},
{
"epoch": 0.2,
"learning_rate": 9.24839604803169e-06,
"loss": 0.7395,
"step": 38
},
{
"epoch": 0.21,
"learning_rate": 9.201997052383107e-06,
"loss": 0.6748,
"step": 39
},
{
"epoch": 0.21,
"learning_rate": 9.154332191606671e-06,
"loss": 0.6289,
"step": 40
},
{
"epoch": 0.22,
"learning_rate": 9.105415824895008e-06,
"loss": 0.7595,
"step": 41
},
{
"epoch": 0.22,
"learning_rate": 9.055262688460931e-06,
"loss": 0.7401,
"step": 42
},
{
"epoch": 0.23,
"learning_rate": 9.003887891098108e-06,
"loss": 0.6546,
"step": 43
},
{
"epoch": 0.23,
"learning_rate": 8.951306909629492e-06,
"loss": 0.6714,
"step": 44
},
{
"epoch": 0.24,
"learning_rate": 8.89753558424488e-06,
"loss": 0.666,
"step": 45
},
{
"epoch": 0.25,
"learning_rate": 8.842590113729001e-06,
"loss": 0.6523,
"step": 46
},
{
"epoch": 0.25,
"learning_rate": 8.786487050581583e-06,
"loss": 0.6988,
"step": 47
},
{
"epoch": 0.26,
"learning_rate": 8.729243296030851e-06,
"loss": 0.7612,
"step": 48
},
{
"epoch": 0.26,
"learning_rate": 8.670876094941991e-06,
"loss": 0.7252,
"step": 49
},
{
"epoch": 0.27,
"learning_rate": 8.611403030622074e-06,
"loss": 0.6683,
"step": 50
},
{
"epoch": 0.27,
"learning_rate": 8.55084201952302e-06,
"loss": 0.6546,
"step": 51
},
{
"epoch": 0.28,
"learning_rate": 8.489211305844216e-06,
"loss": 0.6822,
"step": 52
},
{
"epoch": 0.28,
"learning_rate": 8.4265294560364e-06,
"loss": 0.7031,
"step": 53
},
{
"epoch": 0.29,
"learning_rate": 8.362815353208441e-06,
"loss": 0.706,
"step": 54
},
{
"epoch": 0.29,
"learning_rate": 8.298088191438753e-06,
"loss": 0.7278,
"step": 55
},
{
"epoch": 0.3,
"learning_rate": 8.23236746999302e-06,
"loss": 0.6482,
"step": 56
},
{
"epoch": 0.3,
"learning_rate": 8.165672987449962e-06,
"loss": 0.6553,
"step": 57
},
{
"epoch": 0.31,
"learning_rate": 8.098024835736977e-06,
"loss": 0.7261,
"step": 58
},
{
"epoch": 0.32,
"learning_rate": 8.029443394077356e-06,
"loss": 0.5941,
"step": 59
},
{
"epoch": 0.32,
"learning_rate": 7.959949322850994e-06,
"loss": 0.7581,
"step": 60
},
{
"epoch": 0.33,
"learning_rate": 7.889563557370378e-06,
"loss": 0.6762,
"step": 61
},
{
"epoch": 0.33,
"learning_rate": 7.818307301573757e-06,
"loss": 0.718,
"step": 62
},
{
"epoch": 0.34,
"learning_rate": 7.746202021637385e-06,
"loss": 0.6745,
"step": 63
},
{
"epoch": 0.34,
"learning_rate": 7.67326943950877e-06,
"loss": 0.6839,
"step": 64
},
{
"epoch": 0.35,
"learning_rate": 7.599531526362873e-06,
"loss": 0.7154,
"step": 65
},
{
"epoch": 0.35,
"learning_rate": 7.525010495983202e-06,
"loss": 0.7622,
"step": 66
},
{
"epoch": 0.36,
"learning_rate": 7.449728798069864e-06,
"loss": 0.6516,
"step": 67
},
{
"epoch": 0.36,
"learning_rate": 7.373709111476498e-06,
"loss": 0.635,
"step": 68
},
{
"epoch": 0.37,
"learning_rate": 7.296974337378209e-06,
"loss": 0.6369,
"step": 69
},
{
"epoch": 0.37,
"learning_rate": 7.219547592372512e-06,
"loss": 0.6783,
"step": 70
},
{
"epoch": 0.38,
"learning_rate": 7.141452201515386e-06,
"loss": 0.7469,
"step": 71
},
{
"epoch": 0.38,
"learning_rate": 7.062711691294525e-06,
"loss": 0.6119,
"step": 72
},
{
"epoch": 0.39,
"learning_rate": 6.983349782541901e-06,
"loss": 0.6803,
"step": 73
},
{
"epoch": 0.4,
"learning_rate": 6.903390383287795e-06,
"loss": 0.6944,
"step": 74
},
{
"epoch": 0.4,
"learning_rate": 6.822857581558423e-06,
"loss": 0.662,
"step": 75
},
{
"epoch": 0.41,
"learning_rate": 6.741775638119345e-06,
"loss": 0.7491,
"step": 76
},
{
"epoch": 0.41,
"learning_rate": 6.66016897916682e-06,
"loss": 0.7162,
"step": 77
},
{
"epoch": 0.42,
"learning_rate": 6.57806218896935e-06,
"loss": 0.7451,
"step": 78
},
{
"epoch": 0.42,
"learning_rate": 6.495480002461577e-06,
"loss": 0.5831,
"step": 79
},
{
"epoch": 0.43,
"learning_rate": 6.412447297792818e-06,
"loss": 0.7656,
"step": 80
},
{
"epoch": 0.43,
"learning_rate": 6.328989088832431e-06,
"loss": 0.6309,
"step": 81
},
{
"epoch": 0.44,
"learning_rate": 6.245130517634307e-06,
"loss": 0.719,
"step": 82
},
{
"epoch": 0.44,
"learning_rate": 6.160896846862754e-06,
"loss": 0.6109,
"step": 83
},
{
"epoch": 0.45,
"learning_rate": 6.076313452182033e-06,
"loss": 0.6539,
"step": 84
},
{
"epoch": 0.45,
"learning_rate": 5.991405814611855e-06,
"loss": 0.6642,
"step": 85
},
{
"epoch": 0.46,
"learning_rate": 5.9061995128511455e-06,
"loss": 0.6832,
"step": 86
},
{
"epoch": 0.46,
"learning_rate": 5.820720215572375e-06,
"loss": 0.7376,
"step": 87
},
{
"epoch": 0.47,
"learning_rate": 5.734993673688801e-06,
"loss": 0.6968,
"step": 88
},
{
"epoch": 0.48,
"learning_rate": 5.6490457125969035e-06,
"loss": 0.743,
"step": 89
},
{
"epoch": 0.48,
"learning_rate": 5.562902224396416e-06,
"loss": 0.6412,
"step": 90
},
{
"epoch": 0.49,
"learning_rate": 5.476589160090238e-06,
"loss": 0.6313,
"step": 91
},
{
"epoch": 0.49,
"learning_rate": 5.390132521766626e-06,
"loss": 0.7327,
"step": 92
},
{
"epoch": 0.5,
"learning_rate": 5.30355835476596e-06,
"loss": 0.5725,
"step": 93
},
{
"epoch": 0.5,
"learning_rate": 5.216892739834519e-06,
"loss": 0.6587,
"step": 94
},
{
"epoch": 0.51,
"learning_rate": 5.13016178526756e-06,
"loss": 0.5893,
"step": 95
},
{
"epoch": 0.51,
"learning_rate": 5.043391619044122e-06,
"loss": 0.6343,
"step": 96
},
{
"epoch": 0.52,
"learning_rate": 4.956608380955877e-06,
"loss": 0.6764,
"step": 97
},
{
"epoch": 0.52,
"learning_rate": 4.869838214732441e-06,
"loss": 0.6486,
"step": 98
},
{
"epoch": 0.53,
"learning_rate": 4.783107260165483e-06,
"loss": 0.5761,
"step": 99
},
{
"epoch": 0.53,
"learning_rate": 4.696441645234042e-06,
"loss": 0.6928,
"step": 100
},
{
"epoch": 0.54,
"learning_rate": 4.609867478233377e-06,
"loss": 0.635,
"step": 101
},
{
"epoch": 0.54,
"learning_rate": 4.523410839909764e-06,
"loss": 0.6497,
"step": 102
},
{
"epoch": 0.55,
"learning_rate": 4.437097775603587e-06,
"loss": 0.6256,
"step": 103
},
{
"epoch": 0.56,
"learning_rate": 4.350954287403099e-06,
"loss": 0.6605,
"step": 104
},
{
"epoch": 0.56,
"learning_rate": 4.265006326311199e-06,
"loss": 0.6458,
"step": 105
},
{
"epoch": 0.57,
"learning_rate": 4.179279784427625e-06,
"loss": 0.6496,
"step": 106
},
{
"epoch": 0.57,
"learning_rate": 4.093800487148857e-06,
"loss": 0.6505,
"step": 107
},
{
"epoch": 0.58,
"learning_rate": 4.008594185388146e-06,
"loss": 0.6259,
"step": 108
},
{
"epoch": 0.58,
"learning_rate": 3.9236865478179685e-06,
"loss": 0.6401,
"step": 109
},
{
"epoch": 0.59,
"learning_rate": 3.839103153137247e-06,
"loss": 0.5964,
"step": 110
},
{
"epoch": 0.59,
"learning_rate": 3.7548694823656945e-06,
"loss": 0.6578,
"step": 111
},
{
"epoch": 0.6,
"learning_rate": 3.671010911167572e-06,
"loss": 0.6361,
"step": 112
},
{
"epoch": 0.6,
"learning_rate": 3.5875527022071808e-06,
"loss": 0.6685,
"step": 113
},
{
"epoch": 0.61,
"learning_rate": 3.5045199975384225e-06,
"loss": 0.5762,
"step": 114
},
{
"epoch": 0.61,
"learning_rate": 3.4219378110306523e-06,
"loss": 0.5239,
"step": 115
},
{
"epoch": 0.62,
"learning_rate": 3.3398310208331806e-06,
"loss": 0.6064,
"step": 116
},
{
"epoch": 0.62,
"learning_rate": 3.2582243618806574e-06,
"loss": 0.6162,
"step": 117
},
{
"epoch": 0.63,
"learning_rate": 3.177142418441578e-06,
"loss": 0.5415,
"step": 118
},
{
"epoch": 0.64,
"learning_rate": 3.096609616712207e-06,
"loss": 0.5404,
"step": 119
},
{
"epoch": 0.64,
"learning_rate": 3.0166502174581012e-06,
"loss": 0.6535,
"step": 120
},
{
"epoch": 0.65,
"learning_rate": 2.937288308705475e-06,
"loss": 0.6472,
"step": 121
},
{
"epoch": 0.65,
"learning_rate": 2.858547798484613e-06,
"loss": 0.5469,
"step": 122
},
{
"epoch": 0.66,
"learning_rate": 2.7804524076274898e-06,
"loss": 0.699,
"step": 123
},
{
"epoch": 0.66,
"learning_rate": 2.7030256626217932e-06,
"loss": 0.6188,
"step": 124
},
{
"epoch": 0.67,
"learning_rate": 2.6262908885235046e-06,
"loss": 0.6024,
"step": 125
},
{
"epoch": 0.67,
"learning_rate": 2.550271201930136e-06,
"loss": 0.5934,
"step": 126
},
{
"epoch": 0.68,
"learning_rate": 2.474989504016798e-06,
"loss": 0.6309,
"step": 127
},
{
"epoch": 0.68,
"learning_rate": 2.4004684736371276e-06,
"loss": 0.6157,
"step": 128
},
{
"epoch": 0.69,
"learning_rate": 2.32673056049123e-06,
"loss": 0.6419,
"step": 129
},
{
"epoch": 0.69,
"learning_rate": 2.253797978362617e-06,
"loss": 0.5713,
"step": 130
},
{
"epoch": 0.7,
"learning_rate": 2.1816926984262454e-06,
"loss": 0.6575,
"step": 131
},
{
"epoch": 0.7,
"learning_rate": 2.1104364426296237e-06,
"loss": 0.6824,
"step": 132
},
{
"epoch": 0.71,
"learning_rate": 2.040050677149008e-06,
"loss": 0.6374,
"step": 133
},
{
"epoch": 0.72,
"learning_rate": 1.970556605922645e-06,
"loss": 0.5793,
"step": 134
},
{
"epoch": 0.72,
"learning_rate": 1.9019751642630252e-06,
"loss": 0.5671,
"step": 135
},
{
"epoch": 0.73,
"learning_rate": 1.8343270125500379e-06,
"loss": 0.6081,
"step": 136
},
{
"epoch": 0.73,
"learning_rate": 1.7676325300069824e-06,
"loss": 0.5478,
"step": 137
},
{
"epoch": 0.74,
"learning_rate": 1.7019118085612474e-06,
"loss": 0.6179,
"step": 138
},
{
"epoch": 0.74,
"learning_rate": 1.6371846467915603e-06,
"loss": 0.6195,
"step": 139
},
{
"epoch": 0.75,
"learning_rate": 1.5734705439636017e-06,
"loss": 0.6112,
"step": 140
},
{
"epoch": 0.75,
"learning_rate": 1.5107886941557853e-06,
"loss": 0.5949,
"step": 141
},
{
"epoch": 0.76,
"learning_rate": 1.4491579804769817e-06,
"loss": 0.6089,
"step": 142
},
{
"epoch": 0.76,
"learning_rate": 1.3885969693779277e-06,
"loss": 0.6091,
"step": 143
},
{
"epoch": 0.77,
"learning_rate": 1.3291239050580085e-06,
"loss": 0.6016,
"step": 144
},
{
"epoch": 0.77,
"learning_rate": 1.2707567039691505e-06,
"loss": 0.6553,
"step": 145
},
{
"epoch": 0.78,
"learning_rate": 1.213512949418419e-06,
"loss": 0.633,
"step": 146
},
{
"epoch": 0.79,
"learning_rate": 1.1574098862709993e-06,
"loss": 0.6151,
"step": 147
},
{
"epoch": 0.79,
"learning_rate": 1.1024644157551206e-06,
"loss": 0.6017,
"step": 148
},
{
"epoch": 0.8,
"learning_rate": 1.0486930903705095e-06,
"loss": 0.5714,
"step": 149
},
{
"epoch": 0.8,
"learning_rate": 9.961121089018933e-07,
"loss": 0.6089,
"step": 150
},
{
"epoch": 0.81,
"learning_rate": 9.447373115390702e-07,
"loss": 0.5789,
"step": 151
},
{
"epoch": 0.81,
"learning_rate": 8.945841751049916e-07,
"loss": 0.6077,
"step": 152
},
{
"epoch": 0.82,
"learning_rate": 8.45667808393329e-07,
"loss": 0.5956,
"step": 153
},
{
"epoch": 0.82,
"learning_rate": 7.980029476168943e-07,
"loss": 0.4987,
"step": 154
},
{
"epoch": 0.83,
"learning_rate": 7.516039519683105e-07,
"loss": 0.5695,
"step": 155
},
{
"epoch": 0.83,
"learning_rate": 7.064847992942614e-07,
"loss": 0.6005,
"step": 156
},
{
"epoch": 0.84,
"learning_rate": 6.626590818846163e-07,
"loss": 0.569,
"step": 157
},
{
"epoch": 0.84,
"learning_rate": 6.201400023777105e-07,
"loss": 0.5699,
"step": 158
},
{
"epoch": 0.85,
"learning_rate": 5.789403697830104e-07,
"loss": 0.7248,
"step": 159
},
{
"epoch": 0.85,
"learning_rate": 5.390725956223531e-07,
"loss": 0.7469,
"step": 160
},
{
"epoch": 0.86,
"learning_rate": 5.005486901909429e-07,
"loss": 0.7068,
"step": 161
},
{
"epoch": 0.87,
"learning_rate": 4.6338025893920167e-07,
"loss": 0.5459,
"step": 162
},
{
"epoch": 0.87,
"learning_rate": 4.275784989765985e-07,
"loss": 0.6079,
"step": 163
},
{
"epoch": 0.88,
"learning_rate": 3.93154195698478e-07,
"loss": 0.5906,
"step": 164
},
{
"epoch": 0.88,
"learning_rate": 3.6011771953693044e-07,
"loss": 0.6135,
"step": 165
},
{
"epoch": 0.89,
"learning_rate": 3.284790228366602e-07,
"loss": 0.4893,
"step": 166
},
{
"epoch": 0.89,
"learning_rate": 2.982476368568177e-07,
"loss": 0.5953,
"step": 167
},
{
"epoch": 0.9,
"learning_rate": 2.6943266889966624e-07,
"loss": 0.6246,
"step": 168
},
{
"epoch": 0.9,
"learning_rate": 2.4204279956698994e-07,
"loss": 0.5807,
"step": 169
},
{
"epoch": 0.91,
"learning_rate": 2.1608628014502364e-07,
"loss": 0.6546,
"step": 170
},
{
"epoch": 0.91,
"learning_rate": 1.915709301187335e-07,
"loss": 0.5779,
"step": 171
},
{
"epoch": 0.92,
"learning_rate": 1.6850413481616868e-07,
"loss": 0.6579,
"step": 172
},
{
"epoch": 0.92,
"learning_rate": 1.468928431836092e-07,
"loss": 0.6288,
"step": 173
},
{
"epoch": 0.93,
"learning_rate": 1.2674356569217282e-07,
"loss": 0.6266,
"step": 174
},
{
"epoch": 0.93,
"learning_rate": 1.080623723765134e-07,
"loss": 0.6483,
"step": 175
},
{
"epoch": 0.94,
"learning_rate": 9.085489100620737e-08,
"loss": 0.5573,
"step": 176
},
{
"epoch": 0.95,
"learning_rate": 7.512630539036502e-08,
"loss": 0.5503,
"step": 177
},
{
"epoch": 0.95,
"learning_rate": 6.088135381599414e-08,
"loss": 0.6147,
"step": 178
},
{
"epoch": 0.96,
"learning_rate": 4.8124327620576726e-08,
"loss": 0.6135,
"step": 179
},
{
"epoch": 0.96,
"learning_rate": 3.685906989928656e-08,
"loss": 0.6094,
"step": 180
},
{
"epoch": 0.97,
"learning_rate": 2.7088974347246888e-08,
"loss": 0.6843,
"step": 181
},
{
"epoch": 0.97,
"learning_rate": 1.8816984237169378e-08,
"loss": 0.5167,
"step": 182
},
{
"epoch": 0.98,
"learning_rate": 1.2045591532681145e-08,
"loss": 0.5398,
"step": 183
},
{
"epoch": 0.98,
"learning_rate": 6.7768361376152616e-09,
"loss": 0.5482,
"step": 184
},
{
"epoch": 0.99,
"learning_rate": 3.0123052814812203e-09,
"loss": 0.6061,
"step": 185
},
{
"epoch": 0.99,
"learning_rate": 7.53133041307974e-10,
"loss": 0.5735,
"step": 186
},
{
"epoch": 1.0,
"learning_rate": 0.0,
"loss": 0.5353,
"step": 187
}
],
"log_save_evaluate_time": 3923.0782437324524,
"logging_steps": 1.0,
"max_steps": 187,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 40,
"total_flos": 7.673772156280793e+19,
"total_tokens": 900759552.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}