GanymedeNil's picture
First model version
89ce1a9
{
"best_metric": 1.6840696334838867,
"best_model_checkpoint": "saves/Gemma-2B/lora/train_2024-03-01-04-36-32/checkpoint-400",
"epoch": 0.7111111111111111,
"eval_steps": 100,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 6.6079277992248535,
"learning_rate": 4.999960939662063e-05,
"loss": 3.747,
"step": 10
},
{
"epoch": 0.04,
"grad_norm": 3.2283411026000977,
"learning_rate": 4.999843759868819e-05,
"loss": 3.5789,
"step": 20
},
{
"epoch": 0.05,
"grad_norm": 41.573001861572266,
"learning_rate": 4.999648464281934e-05,
"loss": 3.1683,
"step": 30
},
{
"epoch": 0.07,
"grad_norm": 4.080965518951416,
"learning_rate": 4.9993750590040575e-05,
"loss": 2.8275,
"step": 40
},
{
"epoch": 0.09,
"grad_norm": 4.576275825500488,
"learning_rate": 4.999023552578632e-05,
"loss": 2.6758,
"step": 50
},
{
"epoch": 0.11,
"grad_norm": 18.012842178344727,
"learning_rate": 4.998593955989626e-05,
"loss": 2.6287,
"step": 60
},
{
"epoch": 0.12,
"grad_norm": 5.738934516906738,
"learning_rate": 4.9980862826611875e-05,
"loss": 2.5284,
"step": 70
},
{
"epoch": 0.14,
"grad_norm": 3.353776216506958,
"learning_rate": 4.9975005484572305e-05,
"loss": 2.2608,
"step": 80
},
{
"epoch": 0.16,
"grad_norm": 4.6298699378967285,
"learning_rate": 4.9968367716809374e-05,
"loss": 2.2475,
"step": 90
},
{
"epoch": 0.18,
"grad_norm": 50.594207763671875,
"learning_rate": 4.996094973074183e-05,
"loss": 2.2007,
"step": 100
},
{
"epoch": 0.18,
"eval_loss": 2.126384735107422,
"eval_runtime": 124.9221,
"eval_samples_per_second": 8.005,
"eval_steps_per_second": 2.001,
"step": 100
},
{
"epoch": 0.2,
"grad_norm": 10.225520133972168,
"learning_rate": 4.995275175816891e-05,
"loss": 1.9414,
"step": 110
},
{
"epoch": 0.21,
"grad_norm": 4.777626991271973,
"learning_rate": 4.994377405526308e-05,
"loss": 1.9729,
"step": 120
},
{
"epoch": 0.23,
"grad_norm": 6.133576393127441,
"learning_rate": 4.993401690256203e-05,
"loss": 2.0237,
"step": 130
},
{
"epoch": 0.25,
"grad_norm": 5.396271228790283,
"learning_rate": 4.992348060495989e-05,
"loss": 2.009,
"step": 140
},
{
"epoch": 0.27,
"grad_norm": 3.4974453449249268,
"learning_rate": 4.991216549169776e-05,
"loss": 2.032,
"step": 150
},
{
"epoch": 0.28,
"grad_norm": 12.256199836730957,
"learning_rate": 4.990007191635334e-05,
"loss": 1.9548,
"step": 160
},
{
"epoch": 0.3,
"grad_norm": 7.5634379386901855,
"learning_rate": 4.988720025682995e-05,
"loss": 1.8164,
"step": 170
},
{
"epoch": 0.32,
"grad_norm": 14.023727416992188,
"learning_rate": 4.987355091534468e-05,
"loss": 1.8517,
"step": 180
},
{
"epoch": 0.34,
"grad_norm": 4.622091293334961,
"learning_rate": 4.985912431841584e-05,
"loss": 2.0255,
"step": 190
},
{
"epoch": 0.36,
"grad_norm": 3.9935083389282227,
"learning_rate": 4.9843920916849645e-05,
"loss": 1.8777,
"step": 200
},
{
"epoch": 0.36,
"eval_loss": 1.8619400262832642,
"eval_runtime": 124.8712,
"eval_samples_per_second": 8.008,
"eval_steps_per_second": 2.002,
"step": 200
},
{
"epoch": 0.37,
"grad_norm": 6.256485939025879,
"learning_rate": 4.982794118572609e-05,
"loss": 1.8885,
"step": 210
},
{
"epoch": 0.39,
"grad_norm": 13.212824821472168,
"learning_rate": 4.981118562438414e-05,
"loss": 1.7744,
"step": 220
},
{
"epoch": 0.41,
"grad_norm": 4.2626118659973145,
"learning_rate": 4.9793654756406085e-05,
"loss": 1.7545,
"step": 230
},
{
"epoch": 0.43,
"grad_norm": 4.217405796051025,
"learning_rate": 4.9775349129601243e-05,
"loss": 1.5633,
"step": 240
},
{
"epoch": 0.44,
"grad_norm": 22.393404006958008,
"learning_rate": 4.9756269315988804e-05,
"loss": 1.8871,
"step": 250
},
{
"epoch": 0.46,
"grad_norm": 3.6576473712921143,
"learning_rate": 4.973641591177991e-05,
"loss": 1.7037,
"step": 260
},
{
"epoch": 0.48,
"grad_norm": 4.2433271408081055,
"learning_rate": 4.971578953735912e-05,
"loss": 1.7631,
"step": 270
},
{
"epoch": 0.5,
"grad_norm": 3.7399721145629883,
"learning_rate": 4.969439083726496e-05,
"loss": 1.7714,
"step": 280
},
{
"epoch": 0.52,
"grad_norm": 4.575680255889893,
"learning_rate": 4.967222048016979e-05,
"loss": 1.8699,
"step": 290
},
{
"epoch": 0.53,
"grad_norm": 7.729683876037598,
"learning_rate": 4.964927915885893e-05,
"loss": 1.6566,
"step": 300
},
{
"epoch": 0.53,
"eval_loss": 1.7350378036499023,
"eval_runtime": 124.9278,
"eval_samples_per_second": 8.005,
"eval_steps_per_second": 2.001,
"step": 300
},
{
"epoch": 0.55,
"grad_norm": 2.755899667739868,
"learning_rate": 4.962556759020898e-05,
"loss": 1.7193,
"step": 310
},
{
"epoch": 0.57,
"grad_norm": 3.513024091720581,
"learning_rate": 4.960108651516545e-05,
"loss": 1.852,
"step": 320
},
{
"epoch": 0.59,
"grad_norm": 3.7794790267944336,
"learning_rate": 4.9575836698719605e-05,
"loss": 1.6785,
"step": 330
},
{
"epoch": 0.6,
"grad_norm": 3.2256739139556885,
"learning_rate": 4.954981892988451e-05,
"loss": 1.6648,
"step": 340
},
{
"epoch": 0.62,
"grad_norm": 2.8756954669952393,
"learning_rate": 4.952303402167047e-05,
"loss": 1.6399,
"step": 350
},
{
"epoch": 0.64,
"grad_norm": 7.057961463928223,
"learning_rate": 4.949548281105951e-05,
"loss": 1.5875,
"step": 360
},
{
"epoch": 0.66,
"grad_norm": 4.63081169128418,
"learning_rate": 4.946716615897932e-05,
"loss": 1.6708,
"step": 370
},
{
"epoch": 0.68,
"grad_norm": 8.755204200744629,
"learning_rate": 4.943808495027631e-05,
"loss": 1.636,
"step": 380
},
{
"epoch": 0.69,
"grad_norm": 10.21866226196289,
"learning_rate": 4.940824009368793e-05,
"loss": 1.5714,
"step": 390
},
{
"epoch": 0.71,
"grad_norm": 5.44133186340332,
"learning_rate": 4.937763252181434e-05,
"loss": 1.4084,
"step": 400
},
{
"epoch": 0.71,
"eval_loss": 1.6840696334838867,
"eval_runtime": 124.8851,
"eval_samples_per_second": 8.007,
"eval_steps_per_second": 2.002,
"step": 400
}
],
"logging_steps": 10,
"max_steps": 5620,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 100,
"total_flos": 3.103976541168599e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}