{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.3496014543420501, "eval_steps": 5000, "global_step": 5000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.006992029086841001, "grad_norm": 75.90129852294922, "learning_rate": 6.780845858091577e-07, "loss": 16.865, "step": 100 }, { "epoch": 0.013984058173682002, "grad_norm": 71.16612243652344, "learning_rate": 1.3771408598392172e-06, "loss": 16.1556, "step": 200 }, { "epoch": 0.020976087260523003, "grad_norm": 130.67613220214844, "learning_rate": 2.0761971338692767e-06, "loss": 14.8008, "step": 300 }, { "epoch": 0.027968116347364003, "grad_norm": 36.4179801940918, "learning_rate": 2.7682628451590356e-06, "loss": 12.4025, "step": 400 }, { "epoch": 0.034960145434205, "grad_norm": 34.748844146728516, "learning_rate": 3.467319119189095e-06, "loss": 9.7465, "step": 500 }, { "epoch": 0.04195217452104601, "grad_norm": 8.039146423339844, "learning_rate": 4.166375393219155e-06, "loss": 8.448, "step": 600 }, { "epoch": 0.04894420360788701, "grad_norm": 10.288451194763184, "learning_rate": 4.865431667249214e-06, "loss": 8.1951, "step": 700 }, { "epoch": 0.05593623269472801, "grad_norm": 6.3236308097839355, "learning_rate": 5.564487941279273e-06, "loss": 8.1093, "step": 800 }, { "epoch": 0.06292826178156902, "grad_norm": 15.230368614196777, "learning_rate": 6.263544215309333e-06, "loss": 8.0567, "step": 900 }, { "epoch": 0.06992029086841, "grad_norm": 5.573168754577637, "learning_rate": 6.962600489339393e-06, "loss": 8.0401, "step": 1000 }, { "epoch": 0.07691231995525101, "grad_norm": 6.7740325927734375, "learning_rate": 7.661656763369451e-06, "loss": 7.9491, "step": 1100 }, { "epoch": 0.08390434904209201, "grad_norm": 6.167610168457031, "learning_rate": 8.360713037399512e-06, "loss": 7.9494, "step": 1200 }, { "epoch": 0.09089637812893302, "grad_norm": 5.53561544418335, "learning_rate": 9.05976931142957e-06, "loss": 7.9386, "step": 1300 }, { "epoch": 0.09788840721577402, "grad_norm": 6.056953430175781, "learning_rate": 9.758825585459631e-06, "loss": 7.9033, "step": 1400 }, { "epoch": 0.10488043630261502, "grad_norm": 5.160706043243408, "learning_rate": 1.045788185948969e-05, "loss": 7.9055, "step": 1500 }, { "epoch": 0.11187246538945601, "grad_norm": 5.525616645812988, "learning_rate": 1.115693813351975e-05, "loss": 7.9203, "step": 1600 }, { "epoch": 0.11886449447629702, "grad_norm": 5.553597927093506, "learning_rate": 1.185599440754981e-05, "loss": 7.8381, "step": 1700 }, { "epoch": 0.12585652356313803, "grad_norm": 4.8832478523254395, "learning_rate": 1.2555050681579868e-05, "loss": 7.8679, "step": 1800 }, { "epoch": 0.13284855264997902, "grad_norm": 5.308164596557617, "learning_rate": 1.3254106955609927e-05, "loss": 7.8686, "step": 1900 }, { "epoch": 0.13984058173682, "grad_norm": 6.790646553039551, "learning_rate": 1.3953163229639988e-05, "loss": 7.8252, "step": 2000 }, { "epoch": 0.14683261082366103, "grad_norm": 5.003779888153076, "learning_rate": 1.4652219503670046e-05, "loss": 7.856, "step": 2100 }, { "epoch": 0.15382463991050202, "grad_norm": 5.0926642417907715, "learning_rate": 1.5351275777700107e-05, "loss": 7.8301, "step": 2200 }, { "epoch": 0.16081666899734304, "grad_norm": 4.154193878173828, "learning_rate": 1.6050332051730168e-05, "loss": 7.8595, "step": 2300 }, { "epoch": 0.16780869808418403, "grad_norm": 4.168135166168213, "learning_rate": 1.6749388325760225e-05, "loss": 7.8138, "step": 2400 }, { "epoch": 0.17480072717102504, "grad_norm": 4.69076681137085, "learning_rate": 1.7448444599790285e-05, "loss": 7.812, "step": 2500 }, { "epoch": 0.18179275625786603, "grad_norm": 4.386991500854492, "learning_rate": 1.8147500873820346e-05, "loss": 7.8261, "step": 2600 }, { "epoch": 0.18878478534470702, "grad_norm": 5.777139186859131, "learning_rate": 1.8846557147850403e-05, "loss": 7.7988, "step": 2700 }, { "epoch": 0.19577681443154804, "grad_norm": 18.3277645111084, "learning_rate": 1.9545613421880464e-05, "loss": 7.7965, "step": 2800 }, { "epoch": 0.20276884351838903, "grad_norm": 4.5356669425964355, "learning_rate": 1.997280814201919e-05, "loss": 7.783, "step": 2900 }, { "epoch": 0.20976087260523005, "grad_norm": 5.4392476081848145, "learning_rate": 1.9895117119216877e-05, "loss": 7.7752, "step": 3000 }, { "epoch": 0.21675290169207104, "grad_norm": 5.365427017211914, "learning_rate": 1.981742609641456e-05, "loss": 7.7715, "step": 3100 }, { "epoch": 0.22374493077891203, "grad_norm": 4.111672401428223, "learning_rate": 1.9739735073612244e-05, "loss": 7.7903, "step": 3200 }, { "epoch": 0.23073695986575304, "grad_norm": 4.107003211975098, "learning_rate": 1.9662044050809928e-05, "loss": 7.7656, "step": 3300 }, { "epoch": 0.23772898895259403, "grad_norm": 4.667971611022949, "learning_rate": 1.9584353028007615e-05, "loss": 7.749, "step": 3400 }, { "epoch": 0.24472101803943505, "grad_norm": 4.010958194732666, "learning_rate": 1.95066620052053e-05, "loss": 7.7662, "step": 3500 }, { "epoch": 0.25171304712627607, "grad_norm": 4.07546329498291, "learning_rate": 1.9428970982402986e-05, "loss": 7.7492, "step": 3600 }, { "epoch": 0.25870507621311706, "grad_norm": 5.414000988006592, "learning_rate": 1.935127995960067e-05, "loss": 7.737, "step": 3700 }, { "epoch": 0.26569710529995805, "grad_norm": 4.4744648933410645, "learning_rate": 1.9273588936798357e-05, "loss": 7.7232, "step": 3800 }, { "epoch": 0.27268913438679904, "grad_norm": 4.141202926635742, "learning_rate": 1.919589791399604e-05, "loss": 7.7616, "step": 3900 }, { "epoch": 0.27968116347364, "grad_norm": 4.493043899536133, "learning_rate": 1.9118206891193724e-05, "loss": 7.7391, "step": 4000 }, { "epoch": 0.28667319256048107, "grad_norm": 4.083165168762207, "learning_rate": 1.904051586839141e-05, "loss": 7.7552, "step": 4100 }, { "epoch": 0.29366522164732206, "grad_norm": 8.303427696228027, "learning_rate": 1.8962824845589095e-05, "loss": 7.7273, "step": 4200 }, { "epoch": 0.30065725073416305, "grad_norm": 4.003914833068848, "learning_rate": 1.888513382278678e-05, "loss": 7.7216, "step": 4300 }, { "epoch": 0.30764927982100404, "grad_norm": 4.859315395355225, "learning_rate": 1.8807442799984462e-05, "loss": 7.7371, "step": 4400 }, { "epoch": 0.31464130890784503, "grad_norm": 4.693440914154053, "learning_rate": 1.872975177718215e-05, "loss": 7.7426, "step": 4500 }, { "epoch": 0.3216333379946861, "grad_norm": 4.81652307510376, "learning_rate": 1.8652060754379833e-05, "loss": 7.7406, "step": 4600 }, { "epoch": 0.32862536708152706, "grad_norm": 3.861663341522217, "learning_rate": 1.8574369731577516e-05, "loss": 7.712, "step": 4700 }, { "epoch": 0.33561739616836805, "grad_norm": 4.218888282775879, "learning_rate": 1.8496678708775203e-05, "loss": 7.7466, "step": 4800 }, { "epoch": 0.34260942525520904, "grad_norm": 8.032899856567383, "learning_rate": 1.8418987685972887e-05, "loss": 7.7058, "step": 4900 }, { "epoch": 0.3496014543420501, "grad_norm": 4.292692184448242, "learning_rate": 1.8342073573398596e-05, "loss": 7.7139, "step": 5000 }, { "epoch": 0.3496014543420501, "eval_loss": 7.6895527839660645, "eval_runtime": 261.6657, "eval_samples_per_second": 1752.27, "eval_steps_per_second": 13.693, "step": 5000 } ], "logging_steps": 100, "max_steps": 28604, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 0.0, "train_batch_size": 128, "trial_name": null, "trial_params": null }