ShenaoZ's picture
Model save
a54d8ed verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 7.774161688327495,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.807276487350464,
"logits/rejected": -2.7759768962860107,
"logps/chosen": -315.42626953125,
"logps/rejected": -227.5915985107422,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 7.451268268455854,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.7555460929870605,
"logits/rejected": -2.7461297512054443,
"logps/chosen": -271.95458984375,
"logps/rejected": -260.7107849121094,
"loss": 0.6914,
"rewards/accuracies": 0.5347222089767456,
"rewards/chosen": 0.005153654608875513,
"rewards/margins": 0.0036522927694022655,
"rewards/rejected": 0.001501361490227282,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 6.7050372954284905,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -2.7869606018066406,
"logits/rejected": -2.76808762550354,
"logps/chosen": -261.46710205078125,
"logps/rejected": -250.8916015625,
"loss": 0.6746,
"rewards/accuracies": 0.690625011920929,
"rewards/chosen": 0.032145868986845016,
"rewards/margins": 0.04063734412193298,
"rewards/rejected": -0.008491471409797668,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 7.445996241869226,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -2.7877469062805176,
"logits/rejected": -2.768691062927246,
"logps/chosen": -293.81890869140625,
"logps/rejected": -254.21170043945312,
"loss": 0.6486,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -0.023628313094377518,
"rewards/margins": 0.12690803408622742,
"rewards/rejected": -0.15053634345531464,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 8.132911479301741,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -2.7604308128356934,
"logits/rejected": -2.7378690242767334,
"logps/chosen": -264.1637878417969,
"logps/rejected": -255.6041259765625,
"loss": 0.6353,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.11244319379329681,
"rewards/margins": 0.16049158573150635,
"rewards/rejected": -0.27293476462364197,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 8.515463629957392,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -2.7702841758728027,
"logits/rejected": -2.750366449356079,
"logps/chosen": -279.08843994140625,
"logps/rejected": -287.2726745605469,
"loss": 0.6201,
"rewards/accuracies": 0.628125011920929,
"rewards/chosen": -0.22079789638519287,
"rewards/margins": 0.13680399954319,
"rewards/rejected": -0.35760191082954407,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.649011991791806,
"train_runtime": 1630.9121,
"train_samples_per_second": 9.371,
"train_steps_per_second": 0.036
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}