sigridjineth's picture
Upload folder using huggingface_hub
e8ae107 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 15655,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.15969338869370808,
"grad_norm": 0.4899541735649109,
"learning_rate": 2.910507824976046e-06,
"loss": 5.8222,
"step": 500
},
{
"epoch": 0.31938677738741617,
"grad_norm": 0.08963651955127716,
"learning_rate": 2.8146917917598215e-06,
"loss": 4.9707,
"step": 1000
},
{
"epoch": 0.47908016608112425,
"grad_norm": 0.05247149616479874,
"learning_rate": 2.7188757585435964e-06,
"loss": 4.9317,
"step": 1500
},
{
"epoch": 0.6387735547748323,
"grad_norm": 0.05341716483235359,
"learning_rate": 2.6230597253273713e-06,
"loss": 4.9183,
"step": 2000
},
{
"epoch": 0.7984669434685404,
"grad_norm": 0.07143773138523102,
"learning_rate": 2.5272436921111466e-06,
"loss": 4.8527,
"step": 2500
},
{
"epoch": 0.9581603321622485,
"grad_norm": 0.0,
"learning_rate": 2.431427658894922e-06,
"loss": 4.8384,
"step": 3000
},
{
"epoch": 1.1178537208559565,
"grad_norm": 0.041869938373565674,
"learning_rate": 2.335611625678697e-06,
"loss": 4.8368,
"step": 3500
},
{
"epoch": 1.2775471095496647,
"grad_norm": 0.0,
"learning_rate": 2.239795592462472e-06,
"loss": 4.8367,
"step": 4000
},
{
"epoch": 1.4372404982433729,
"grad_norm": 0.0,
"learning_rate": 2.1441711913126796e-06,
"loss": 4.8323,
"step": 4500
},
{
"epoch": 1.5969338869370808,
"grad_norm": 0.0,
"learning_rate": 2.048355158096455e-06,
"loss": 4.8311,
"step": 5000
},
{
"epoch": 1.7566272756307888,
"grad_norm": 0.0,
"learning_rate": 1.9525391248802303e-06,
"loss": 4.8316,
"step": 5500
},
{
"epoch": 1.916320664324497,
"grad_norm": 0.018847908824682236,
"learning_rate": 1.856723091664005e-06,
"loss": 4.8319,
"step": 6000
},
{
"epoch": 2.076014053018205,
"grad_norm": 0.0,
"learning_rate": 1.7610986905142128e-06,
"loss": 4.8327,
"step": 6500
},
{
"epoch": 2.235707441711913,
"grad_norm": 0.06726948916912079,
"learning_rate": 1.665282657297988e-06,
"loss": 4.8334,
"step": 7000
},
{
"epoch": 2.395400830405621,
"grad_norm": 0.0,
"learning_rate": 1.5694666240817628e-06,
"loss": 4.8337,
"step": 7500
},
{
"epoch": 2.5550942190993293,
"grad_norm": 0.0,
"learning_rate": 1.4736505908655382e-06,
"loss": 4.8333,
"step": 8000
},
{
"epoch": 2.7147876077930375,
"grad_norm": 0.0,
"learning_rate": 1.3780261897157458e-06,
"loss": 4.8341,
"step": 8500
},
{
"epoch": 2.8744809964867457,
"grad_norm": 0.0,
"learning_rate": 1.282210156499521e-06,
"loss": 4.8345,
"step": 9000
},
{
"epoch": 3.0341743851804535,
"grad_norm": 0.0,
"learning_rate": 1.1863941232832963e-06,
"loss": 4.8347,
"step": 9500
},
{
"epoch": 3.1938677738741617,
"grad_norm": 0.0,
"learning_rate": 1.0905780900670712e-06,
"loss": 4.8344,
"step": 10000
},
{
"epoch": 3.35356116256787,
"grad_norm": 0.0715227872133255,
"learning_rate": 9.947620568508465e-07,
"loss": 4.8346,
"step": 10500
},
{
"epoch": 3.5132545512615776,
"grad_norm": 0.0,
"learning_rate": 8.99137655701054e-07,
"loss": 4.8356,
"step": 11000
},
{
"epoch": 3.672947939955286,
"grad_norm": 0.0,
"learning_rate": 8.033216224848291e-07,
"loss": 4.8365,
"step": 11500
},
{
"epoch": 3.832641328648994,
"grad_norm": 0.0,
"learning_rate": 7.075055892686044e-07,
"loss": 4.8364,
"step": 12000
},
{
"epoch": 3.992334717342702,
"grad_norm": 0.0,
"learning_rate": 6.116895560523795e-07,
"loss": 4.8363,
"step": 12500
},
{
"epoch": 4.15202810603641,
"grad_norm": 0.0428263284265995,
"learning_rate": 5.158735228361546e-07,
"loss": 4.8361,
"step": 13000
},
{
"epoch": 4.311721494730119,
"grad_norm": 0.024692021310329437,
"learning_rate": 4.200574896199297e-07,
"loss": 4.8366,
"step": 13500
},
{
"epoch": 4.471414883423826,
"grad_norm": 0.009164220653474331,
"learning_rate": 3.242414564037049e-07,
"loss": 4.8368,
"step": 14000
},
{
"epoch": 4.631108272117534,
"grad_norm": 0.0,
"learning_rate": 2.286170552539125e-07,
"loss": 4.8366,
"step": 14500
},
{
"epoch": 4.790801660811242,
"grad_norm": 0.0,
"learning_rate": 1.3280102203768765e-07,
"loss": 4.837,
"step": 15000
},
{
"epoch": 4.9504950495049505,
"grad_norm": 0.0,
"learning_rate": 3.698498882146279e-08,
"loss": 4.837,
"step": 15500
}
],
"logging_steps": 500,
"max_steps": 15655,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}