{ "best_metric": null, "best_model_checkpoint": null, "epoch": 9.803921568627452, "eval_steps": 500, "global_step": 250, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0392156862745098, "grad_norm": 536.0, "learning_rate": 8.000000000000001e-06, "loss": 51.7984, "step": 1 }, { "epoch": 0.19607843137254902, "grad_norm": 314.0, "learning_rate": 4e-05, "loss": 43.035, "step": 5 }, { "epoch": 0.39215686274509803, "grad_norm": 45.25, "learning_rate": 8e-05, "loss": 28.3835, "step": 10 }, { "epoch": 0.5882352941176471, "grad_norm": 11.5, "learning_rate": 0.00012, "loss": 21.6194, "step": 15 }, { "epoch": 0.7843137254901961, "grad_norm": 10.0625, "learning_rate": 0.00016, "loss": 19.3838, "step": 20 }, { "epoch": 0.9803921568627451, "grad_norm": 37.75, "learning_rate": 0.0002, "loss": 16.725, "step": 25 }, { "epoch": 0.9803921568627451, "eval_loss": 6.552042484283447, "eval_runtime": 2.0523, "eval_samples_per_second": 4.873, "eval_steps_per_second": 0.975, "step": 25 }, { "epoch": 1.1764705882352942, "grad_norm": 10.875, "learning_rate": 0.00019975640502598244, "loss": 7.143, "step": 30 }, { "epoch": 1.3725490196078431, "grad_norm": 7.75, "learning_rate": 0.00019902680687415705, "loss": 2.2222, "step": 35 }, { "epoch": 1.5686274509803921, "grad_norm": 2.453125, "learning_rate": 0.00019781476007338058, "loss": 1.7799, "step": 40 }, { "epoch": 1.7647058823529411, "grad_norm": 4.34375, "learning_rate": 0.0001961261695938319, "loss": 1.5965, "step": 45 }, { "epoch": 1.9607843137254903, "grad_norm": 2.4375, "learning_rate": 0.00019396926207859084, "loss": 1.5122, "step": 50 }, { "epoch": 2.0, "eval_loss": 2.6625783443450928, "eval_runtime": 2.0517, "eval_samples_per_second": 4.874, "eval_steps_per_second": 0.975, "step": 51 }, { "epoch": 2.156862745098039, "grad_norm": 1.7421875, "learning_rate": 0.0001913545457642601, "loss": 1.3478, "step": 55 }, { "epoch": 2.3529411764705883, "grad_norm": 2.046875, "learning_rate": 0.00018829475928589271, "loss": 1.26, "step": 60 }, { "epoch": 2.549019607843137, "grad_norm": 3.8125, "learning_rate": 0.0001848048096156426, "loss": 1.2315, "step": 65 }, { "epoch": 2.7450980392156863, "grad_norm": 4.46875, "learning_rate": 0.00018090169943749476, "loss": 1.1548, "step": 70 }, { "epoch": 2.9411764705882355, "grad_norm": 1.5, "learning_rate": 0.0001766044443118978, "loss": 1.1154, "step": 75 }, { "epoch": 2.980392156862745, "eval_loss": 2.591742753982544, "eval_runtime": 2.0491, "eval_samples_per_second": 4.88, "eval_steps_per_second": 0.976, "step": 76 }, { "epoch": 3.1372549019607843, "grad_norm": 1.9375, "learning_rate": 0.0001719339800338651, "loss": 1.0096, "step": 80 }, { "epoch": 3.3333333333333335, "grad_norm": 3.40625, "learning_rate": 0.00016691306063588583, "loss": 0.9379, "step": 85 }, { "epoch": 3.5294117647058822, "grad_norm": 1.65625, "learning_rate": 0.0001615661475325658, "loss": 0.9333, "step": 90 }, { "epoch": 3.7254901960784315, "grad_norm": 3.109375, "learning_rate": 0.0001559192903470747, "loss": 0.9114, "step": 95 }, { "epoch": 3.9215686274509802, "grad_norm": 11.0, "learning_rate": 0.00015000000000000001, "loss": 0.9204, "step": 100 }, { "epoch": 4.0, "eval_loss": 2.6570026874542236, "eval_runtime": 2.0401, "eval_samples_per_second": 4.902, "eval_steps_per_second": 0.98, "step": 102 }, { "epoch": 4.117647058823529, "grad_norm": 1.4296875, "learning_rate": 0.00014383711467890774, "loss": 0.8556, "step": 105 }, { "epoch": 4.313725490196078, "grad_norm": 1.3984375, "learning_rate": 0.00013746065934159123, "loss": 0.786, "step": 110 }, { "epoch": 4.509803921568627, "grad_norm": 1.0703125, "learning_rate": 0.00013090169943749476, "loss": 0.797, "step": 115 }, { "epoch": 4.705882352941177, "grad_norm": 1.9609375, "learning_rate": 0.00012419218955996676, "loss": 0.7689, "step": 120 }, { "epoch": 4.901960784313726, "grad_norm": 1.4296875, "learning_rate": 0.00011736481776669306, "loss": 0.779, "step": 125 }, { "epoch": 4.980392156862745, "eval_loss": 2.749800205230713, "eval_runtime": 2.0472, "eval_samples_per_second": 4.885, "eval_steps_per_second": 0.977, "step": 127 }, { "epoch": 5.098039215686274, "grad_norm": 15.25, "learning_rate": 0.00011045284632676536, "loss": 0.6889, "step": 130 }, { "epoch": 5.294117647058823, "grad_norm": 1.28125, "learning_rate": 0.00010348994967025012, "loss": 0.6232, "step": 135 }, { "epoch": 5.490196078431373, "grad_norm": 1.359375, "learning_rate": 9.651005032974994e-05, "loss": 0.6134, "step": 140 }, { "epoch": 5.686274509803922, "grad_norm": 1.25, "learning_rate": 8.954715367323468e-05, "loss": 0.612, "step": 145 }, { "epoch": 5.882352941176471, "grad_norm": 1.0390625, "learning_rate": 8.263518223330697e-05, "loss": 0.6207, "step": 150 }, { "epoch": 6.0, "eval_loss": 2.9976024627685547, "eval_runtime": 2.0445, "eval_samples_per_second": 4.891, "eval_steps_per_second": 0.978, "step": 153 }, { "epoch": 6.078431372549019, "grad_norm": 1.1328125, "learning_rate": 7.580781044003324e-05, "loss": 0.5836, "step": 155 }, { "epoch": 6.2745098039215685, "grad_norm": 1.203125, "learning_rate": 6.909830056250527e-05, "loss": 0.5047, "step": 160 }, { "epoch": 6.470588235294118, "grad_norm": 1.4765625, "learning_rate": 6.25393406584088e-05, "loss": 0.4862, "step": 165 }, { "epoch": 6.666666666666667, "grad_norm": 1.21875, "learning_rate": 5.616288532109225e-05, "loss": 0.4953, "step": 170 }, { "epoch": 6.862745098039216, "grad_norm": 1.1484375, "learning_rate": 5.000000000000002e-05, "loss": 0.4762, "step": 175 }, { "epoch": 6.980392156862745, "eval_loss": 3.466813325881958, "eval_runtime": 2.0565, "eval_samples_per_second": 4.863, "eval_steps_per_second": 0.973, "step": 178 }, { "epoch": 7.0588235294117645, "grad_norm": 1.203125, "learning_rate": 4.4080709652925336e-05, "loss": 0.4669, "step": 180 }, { "epoch": 7.254901960784314, "grad_norm": 1.4140625, "learning_rate": 3.843385246743417e-05, "loss": 0.3974, "step": 185 }, { "epoch": 7.450980392156863, "grad_norm": 1.0625, "learning_rate": 3.308693936411421e-05, "loss": 0.3956, "step": 190 }, { "epoch": 7.647058823529412, "grad_norm": 1.0703125, "learning_rate": 2.8066019966134904e-05, "loss": 0.386, "step": 195 }, { "epoch": 7.8431372549019605, "grad_norm": 1.1328125, "learning_rate": 2.339555568810221e-05, "loss": 0.3908, "step": 200 }, { "epoch": 8.0, "eval_loss": 3.824570417404175, "eval_runtime": 2.0426, "eval_samples_per_second": 4.896, "eval_steps_per_second": 0.979, "step": 204 }, { "epoch": 8.03921568627451, "grad_norm": 0.984375, "learning_rate": 1.9098300562505266e-05, "loss": 0.3721, "step": 205 }, { "epoch": 8.235294117647058, "grad_norm": 0.96875, "learning_rate": 1.5195190384357404e-05, "loss": 0.3342, "step": 210 }, { "epoch": 8.431372549019608, "grad_norm": 1.046875, "learning_rate": 1.1705240714107302e-05, "loss": 0.346, "step": 215 }, { "epoch": 8.627450980392156, "grad_norm": 1.046875, "learning_rate": 8.645454235739903e-06, "loss": 0.3381, "step": 220 }, { "epoch": 8.823529411764707, "grad_norm": 1.046875, "learning_rate": 6.030737921409169e-06, "loss": 0.3418, "step": 225 }, { "epoch": 8.980392156862745, "eval_loss": 4.056135177612305, "eval_runtime": 2.0561, "eval_samples_per_second": 4.864, "eval_steps_per_second": 0.973, "step": 229 }, { "epoch": 9.019607843137255, "grad_norm": 0.88671875, "learning_rate": 3.873830406168111e-06, "loss": 0.3294, "step": 230 }, { "epoch": 9.215686274509803, "grad_norm": 0.89453125, "learning_rate": 2.1852399266194314e-06, "loss": 0.3265, "step": 235 }, { "epoch": 9.411764705882353, "grad_norm": 0.9296875, "learning_rate": 9.731931258429638e-07, "loss": 0.3268, "step": 240 }, { "epoch": 9.607843137254902, "grad_norm": 0.91796875, "learning_rate": 2.4359497401758024e-07, "loss": 0.3257, "step": 245 }, { "epoch": 9.803921568627452, "grad_norm": 0.9140625, "learning_rate": 0.0, "loss": 0.3252, "step": 250 }, { "epoch": 9.803921568627452, "eval_loss": 4.065803050994873, "eval_runtime": 2.0429, "eval_samples_per_second": 4.895, "eval_steps_per_second": 0.979, "step": 250 }, { "epoch": 9.803921568627452, "step": 250, "total_flos": 1.9110914639160934e+17, "train_loss": 3.4213723726272582, "train_runtime": 1646.3335, "train_samples_per_second": 2.448, "train_steps_per_second": 0.152 } ], "logging_steps": 5, "max_steps": 250, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 100, "total_flos": 1.9110914639160934e+17, "train_batch_size": 4, "trial_name": null, "trial_params": null }