{ "best_metric": 0.43766666666666665, "best_model_checkpoint": "clip-vit-large-patch14-finetuned-clip-vit-large-patch14-mnist_linear_probe/checkpoint-315", "epoch": 2.985781990521327, "eval_steps": 500, "global_step": 315, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0947867298578199, "grad_norm": 2.288234233856201, "learning_rate": 1.5625e-05, "loss": 2.4094, "step": 10 }, { "epoch": 0.1895734597156398, "grad_norm": 2.1556286811828613, "learning_rate": 3.125e-05, "loss": 2.3858, "step": 20 }, { "epoch": 0.2843601895734597, "grad_norm": 2.1012516021728516, "learning_rate": 4.6875e-05, "loss": 2.3743, "step": 30 }, { "epoch": 0.3791469194312796, "grad_norm": 1.212685227394104, "learning_rate": 4.858657243816255e-05, "loss": 2.3353, "step": 40 }, { "epoch": 0.47393364928909953, "grad_norm": 1.0641510486602783, "learning_rate": 4.6819787985865726e-05, "loss": 2.3048, "step": 50 }, { "epoch": 0.5687203791469194, "grad_norm": 0.9478774070739746, "learning_rate": 4.50530035335689e-05, "loss": 2.2905, "step": 60 }, { "epoch": 0.6635071090047393, "grad_norm": 0.7033135294914246, "learning_rate": 4.328621908127209e-05, "loss": 2.2732, "step": 70 }, { "epoch": 0.7582938388625592, "grad_norm": 0.8783323168754578, "learning_rate": 4.1519434628975266e-05, "loss": 2.2636, "step": 80 }, { "epoch": 0.8530805687203792, "grad_norm": 0.9178075790405273, "learning_rate": 3.975265017667845e-05, "loss": 2.2455, "step": 90 }, { "epoch": 0.9478672985781991, "grad_norm": 0.6878641843795776, "learning_rate": 3.7985865724381624e-05, "loss": 2.2376, "step": 100 }, { "epoch": 0.995260663507109, "eval_accuracy": 0.2981666666666667, "eval_loss": 2.2017133235931396, "eval_runtime": 24.7638, "eval_samples_per_second": 242.289, "eval_steps_per_second": 1.898, "step": 105 }, { "epoch": 1.042654028436019, "grad_norm": 0.9457613229751587, "learning_rate": 3.621908127208481e-05, "loss": 2.2256, "step": 110 }, { "epoch": 1.1374407582938388, "grad_norm": 0.850648820400238, "learning_rate": 3.445229681978799e-05, "loss": 2.2144, "step": 120 }, { "epoch": 1.2322274881516588, "grad_norm": 0.8362263441085815, "learning_rate": 3.2685512367491163e-05, "loss": 2.2111, "step": 130 }, { "epoch": 1.3270142180094786, "grad_norm": 0.6047300100326538, "learning_rate": 3.0918727915194346e-05, "loss": 2.2002, "step": 140 }, { "epoch": 1.4218009478672986, "grad_norm": 0.9275925755500793, "learning_rate": 2.915194346289753e-05, "loss": 2.1941, "step": 150 }, { "epoch": 1.5165876777251186, "grad_norm": 0.8383633494377136, "learning_rate": 2.738515901060071e-05, "loss": 2.1916, "step": 160 }, { "epoch": 1.6113744075829384, "grad_norm": 0.7653341293334961, "learning_rate": 2.5618374558303885e-05, "loss": 2.1859, "step": 170 }, { "epoch": 1.7061611374407581, "grad_norm": 0.775774359703064, "learning_rate": 2.3851590106007068e-05, "loss": 2.1796, "step": 180 }, { "epoch": 1.8009478672985781, "grad_norm": 0.932090699672699, "learning_rate": 2.2084805653710246e-05, "loss": 2.1708, "step": 190 }, { "epoch": 1.8957345971563981, "grad_norm": 0.9772677421569824, "learning_rate": 2.031802120141343e-05, "loss": 2.1684, "step": 200 }, { "epoch": 1.9905213270142181, "grad_norm": 0.7905783653259277, "learning_rate": 1.855123674911661e-05, "loss": 2.1641, "step": 210 }, { "epoch": 2.0, "eval_accuracy": 0.4041666666666667, "eval_loss": 2.1207876205444336, "eval_runtime": 24.705, "eval_samples_per_second": 242.866, "eval_steps_per_second": 1.902, "step": 211 }, { "epoch": 2.085308056872038, "grad_norm": 1.0608142614364624, "learning_rate": 1.678445229681979e-05, "loss": 2.1633, "step": 220 }, { "epoch": 2.1800947867298577, "grad_norm": 1.0211544036865234, "learning_rate": 1.501766784452297e-05, "loss": 2.1518, "step": 230 }, { "epoch": 2.2748815165876777, "grad_norm": 0.9483183026313782, "learning_rate": 1.3250883392226149e-05, "loss": 2.1507, "step": 240 }, { "epoch": 2.3696682464454977, "grad_norm": 0.6434796452522278, "learning_rate": 1.148409893992933e-05, "loss": 2.1477, "step": 250 }, { "epoch": 2.4644549763033177, "grad_norm": 0.7338018417358398, "learning_rate": 9.717314487632508e-06, "loss": 2.1439, "step": 260 }, { "epoch": 2.5592417061611377, "grad_norm": 0.8191038966178894, "learning_rate": 7.950530035335689e-06, "loss": 2.1466, "step": 270 }, { "epoch": 2.654028436018957, "grad_norm": 0.8383843898773193, "learning_rate": 6.18374558303887e-06, "loss": 2.1435, "step": 280 }, { "epoch": 2.748815165876777, "grad_norm": 0.7335329055786133, "learning_rate": 4.41696113074205e-06, "loss": 2.1399, "step": 290 }, { "epoch": 2.843601895734597, "grad_norm": 0.9002769589424133, "learning_rate": 2.65017667844523e-06, "loss": 2.142, "step": 300 }, { "epoch": 2.938388625592417, "grad_norm": 0.8580403327941895, "learning_rate": 8.8339222614841e-07, "loss": 2.1453, "step": 310 }, { "epoch": 2.985781990521327, "eval_accuracy": 0.43766666666666665, "eval_loss": 2.0961079597473145, "eval_runtime": 24.9225, "eval_samples_per_second": 240.747, "eval_steps_per_second": 1.886, "step": 315 }, { "epoch": 2.985781990521327, "step": 315, "total_flos": 4.411646023570175e+19, "train_loss": 2.214817982628232, "train_runtime": 948.9642, "train_samples_per_second": 170.712, "train_steps_per_second": 0.332 } ], "logging_steps": 10, "max_steps": 315, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 4.411646023570175e+19, "train_batch_size": 128, "trial_name": null, "trial_params": null }