yitzandyeshaya-phi-2 / checkpoint-18 /trainer_state.json
yitzshapiro's picture
Upload folder using huggingface_hub
876b420 verified
raw
history blame contribute delete
No virus
3.13 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.285714285714286,
"eval_steps": 500,
"global_step": 18,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.57,
"grad_norm": 0.2423473596572876,
"learning_rate": 0.0001,
"loss": 4.0857,
"step": 1
},
{
"epoch": 1.14,
"grad_norm": 0.2876349985599518,
"learning_rate": 0.0002,
"loss": 4.396,
"step": 2
},
{
"epoch": 1.71,
"grad_norm": 0.25892937183380127,
"learning_rate": 0.0001875,
"loss": 4.257,
"step": 3
},
{
"epoch": 2.29,
"grad_norm": 0.24695058166980743,
"learning_rate": 0.000175,
"loss": 4.2171,
"step": 4
},
{
"epoch": 2.86,
"grad_norm": 0.439170777797699,
"learning_rate": 0.00016250000000000002,
"loss": 4.0037,
"step": 5
},
{
"epoch": 3.43,
"grad_norm": 0.3510642647743225,
"learning_rate": 0.00015000000000000001,
"loss": 4.1377,
"step": 6
},
{
"epoch": 4.0,
"grad_norm": 0.3439651131629944,
"learning_rate": 0.0001375,
"loss": 4.0279,
"step": 7
},
{
"epoch": 4.57,
"grad_norm": 0.3093964159488678,
"learning_rate": 0.000125,
"loss": 3.8389,
"step": 8
},
{
"epoch": 5.14,
"grad_norm": 0.3536764979362488,
"learning_rate": 0.00011250000000000001,
"loss": 4.1566,
"step": 9
},
{
"epoch": 5.71,
"grad_norm": 0.3965250849723816,
"learning_rate": 0.0001,
"loss": 3.969,
"step": 10
},
{
"epoch": 6.29,
"grad_norm": 0.4037889838218689,
"learning_rate": 8.75e-05,
"loss": 3.9695,
"step": 11
},
{
"epoch": 6.86,
"grad_norm": 0.3280973434448242,
"learning_rate": 7.500000000000001e-05,
"loss": 3.7841,
"step": 12
},
{
"epoch": 7.43,
"grad_norm": 0.3607095777988434,
"learning_rate": 6.25e-05,
"loss": 3.8726,
"step": 13
},
{
"epoch": 8.0,
"grad_norm": 0.3536747992038727,
"learning_rate": 5e-05,
"loss": 3.8848,
"step": 14
},
{
"epoch": 8.57,
"grad_norm": 0.42837923765182495,
"learning_rate": 3.7500000000000003e-05,
"loss": 3.8312,
"step": 15
},
{
"epoch": 9.14,
"grad_norm": 0.3861906826496124,
"learning_rate": 2.5e-05,
"loss": 3.9284,
"step": 16
},
{
"epoch": 9.71,
"grad_norm": 0.3450121581554413,
"learning_rate": 1.25e-05,
"loss": 3.8218,
"step": 17
},
{
"epoch": 10.29,
"grad_norm": 0.3705593943595886,
"learning_rate": 0.0,
"loss": 3.726,
"step": 18
}
],
"logging_steps": 1,
"max_steps": 18,
"num_input_tokens_seen": 0,
"num_train_epochs": 18,
"save_steps": 500,
"total_flos": 5730050919936000.0,
"train_batch_size": 5,
"trial_name": null,
"trial_params": null
}