DPO-ONLY-Zephyr-7B / trainer_state.json
Katayoon's picture
Model save
00416b6 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 7.773524852103462,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.944267749786377,
"logits/rejected": -2.9508228302001953,
"logps/chosen": -334.3809814453125,
"logps/rejected": -278.32745361328125,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 7.455984781222514,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.7712457180023193,
"logits/rejected": -2.757575273513794,
"logps/chosen": -283.988037109375,
"logps/rejected": -264.9224853515625,
"loss": 0.6914,
"rewards/accuracies": 0.5902777910232544,
"rewards/chosen": 0.006971749942749739,
"rewards/margins": 0.005563115235418081,
"rewards/rejected": 0.001408634358085692,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 6.713963977899909,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -2.806164503097534,
"logits/rejected": -2.775848388671875,
"logps/chosen": -266.1190490722656,
"logps/rejected": -247.2109375,
"loss": 0.6745,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": 0.02572602964937687,
"rewards/margins": 0.03478793427348137,
"rewards/rejected": -0.009061903692781925,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 7.399465873915044,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -2.8269295692443848,
"logits/rejected": -2.7941997051239014,
"logps/chosen": -317.8218994140625,
"logps/rejected": -263.57781982421875,
"loss": 0.6486,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -0.01709526591002941,
"rewards/margins": 0.13319885730743408,
"rewards/rejected": -0.15029413998126984,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 8.176633194787062,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -2.807521343231201,
"logits/rejected": -2.792980670928955,
"logps/chosen": -263.8292541503906,
"logps/rejected": -256.5360412597656,
"loss": 0.6354,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -0.13311174511909485,
"rewards/margins": 0.14099790155887604,
"rewards/rejected": -0.2741096615791321,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 8.613420602924664,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -2.7687416076660156,
"logits/rejected": -2.7322001457214355,
"logps/chosen": -278.05157470703125,
"logps/rejected": -263.0083312988281,
"loss": 0.6201,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": -0.1905030906200409,
"rewards/margins": 0.12117175757884979,
"rewards/rejected": -0.3116748332977295,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.6464958999116542,
"train_runtime": 1059.0582,
"train_samples_per_second": 14.431,
"train_steps_per_second": 0.056
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}