tsunayoshi21's picture
Subiendo mi modelo entrenado finetuneado desde Trisert/tinyllama-alpaca
b97bfaa verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.92436974789916,
"eval_steps": 8,
"global_step": 87,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03361344537815126,
"grad_norm": 1.921875,
"learning_rate": 2e-05,
"loss": 3.1589,
"step": 1
},
{
"epoch": 0.03361344537815126,
"eval_loss": 3.214388847351074,
"eval_runtime": 9.5403,
"eval_samples_per_second": 5.241,
"eval_steps_per_second": 0.734,
"step": 1
},
{
"epoch": 0.06722689075630252,
"grad_norm": 1.8984375,
"learning_rate": 4e-05,
"loss": 3.2696,
"step": 2
},
{
"epoch": 0.10084033613445378,
"grad_norm": 1.7109375,
"learning_rate": 6e-05,
"loss": 3.4298,
"step": 3
},
{
"epoch": 0.13445378151260504,
"grad_norm": 1.84375,
"learning_rate": 8e-05,
"loss": 3.1625,
"step": 4
},
{
"epoch": 0.16806722689075632,
"grad_norm": 1.875,
"learning_rate": 0.0001,
"loss": 3.2503,
"step": 5
},
{
"epoch": 0.20168067226890757,
"grad_norm": 1.7734375,
"learning_rate": 0.00012,
"loss": 3.1762,
"step": 6
},
{
"epoch": 0.23529411764705882,
"grad_norm": 1.3828125,
"learning_rate": 0.00014,
"loss": 2.8709,
"step": 7
},
{
"epoch": 0.2689075630252101,
"grad_norm": 1.171875,
"learning_rate": 0.00016,
"loss": 2.8091,
"step": 8
},
{
"epoch": 0.2689075630252101,
"eval_loss": 2.6286215782165527,
"eval_runtime": 10.6222,
"eval_samples_per_second": 4.707,
"eval_steps_per_second": 0.659,
"step": 8
},
{
"epoch": 0.3025210084033613,
"grad_norm": 1.0078125,
"learning_rate": 0.00018,
"loss": 2.6691,
"step": 9
},
{
"epoch": 0.33613445378151263,
"grad_norm": 0.82421875,
"learning_rate": 0.0002,
"loss": 2.6851,
"step": 10
},
{
"epoch": 0.3697478991596639,
"grad_norm": 0.828125,
"learning_rate": 0.00019995608365087946,
"loss": 2.5119,
"step": 11
},
{
"epoch": 0.40336134453781514,
"grad_norm": 0.65234375,
"learning_rate": 0.00019982437317643217,
"loss": 2.3433,
"step": 12
},
{
"epoch": 0.4369747899159664,
"grad_norm": 0.73046875,
"learning_rate": 0.0001996049842615217,
"loss": 2.3176,
"step": 13
},
{
"epoch": 0.47058823529411764,
"grad_norm": 0.7109375,
"learning_rate": 0.00019929810960135172,
"loss": 2.2936,
"step": 14
},
{
"epoch": 0.5042016806722689,
"grad_norm": 0.7890625,
"learning_rate": 0.0001989040187322164,
"loss": 2.3125,
"step": 15
},
{
"epoch": 0.5378151260504201,
"grad_norm": 0.69921875,
"learning_rate": 0.00019842305779475968,
"loss": 2.312,
"step": 16
},
{
"epoch": 0.5378151260504201,
"eval_loss": 2.242363691329956,
"eval_runtime": 10.6302,
"eval_samples_per_second": 4.704,
"eval_steps_per_second": 0.659,
"step": 16
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.70703125,
"learning_rate": 0.0001978556492299504,
"loss": 2.2591,
"step": 17
},
{
"epoch": 0.6050420168067226,
"grad_norm": 0.70703125,
"learning_rate": 0.0001972022914080411,
"loss": 2.3639,
"step": 18
},
{
"epoch": 0.6386554621848739,
"grad_norm": 0.7734375,
"learning_rate": 0.00019646355819083589,
"loss": 2.2533,
"step": 19
},
{
"epoch": 0.6722689075630253,
"grad_norm": 0.58984375,
"learning_rate": 0.00019564009842765225,
"loss": 2.2628,
"step": 20
},
{
"epoch": 0.7058823529411765,
"grad_norm": 0.7265625,
"learning_rate": 0.00019473263538541914,
"loss": 2.2931,
"step": 21
},
{
"epoch": 0.7394957983193278,
"grad_norm": 0.69140625,
"learning_rate": 0.0001937419661134121,
"loss": 2.2773,
"step": 22
},
{
"epoch": 0.773109243697479,
"grad_norm": 0.7109375,
"learning_rate": 0.00019266896074318334,
"loss": 2.3788,
"step": 23
},
{
"epoch": 0.8067226890756303,
"grad_norm": 0.59375,
"learning_rate": 0.00019151456172430183,
"loss": 2.0133,
"step": 24
},
{
"epoch": 0.8067226890756303,
"eval_loss": 2.153235673904419,
"eval_runtime": 10.6243,
"eval_samples_per_second": 4.706,
"eval_steps_per_second": 0.659,
"step": 24
},
{
"epoch": 0.8403361344537815,
"grad_norm": 0.53515625,
"learning_rate": 0.00019027978299657436,
"loss": 2.086,
"step": 25
},
{
"epoch": 0.8739495798319328,
"grad_norm": 0.70703125,
"learning_rate": 0.00018896570909947475,
"loss": 2.2269,
"step": 26
},
{
"epoch": 0.907563025210084,
"grad_norm": 0.65625,
"learning_rate": 0.0001875734942195637,
"loss": 2.3534,
"step": 27
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.55078125,
"learning_rate": 0.00018610436117673555,
"loss": 2.1109,
"step": 28
},
{
"epoch": 0.9747899159663865,
"grad_norm": 0.52734375,
"learning_rate": 0.0001845596003501826,
"loss": 2.2316,
"step": 29
},
{
"epoch": 1.0084033613445378,
"grad_norm": 0.66796875,
"learning_rate": 0.0001829405685450202,
"loss": 2.3328,
"step": 30
},
{
"epoch": 1.0420168067226891,
"grad_norm": 0.455078125,
"learning_rate": 0.00018124868780056814,
"loss": 2.1319,
"step": 31
},
{
"epoch": 1.0756302521008403,
"grad_norm": 0.455078125,
"learning_rate": 0.00017948544414133534,
"loss": 2.1417,
"step": 32
},
{
"epoch": 1.0756302521008403,
"eval_loss": 2.1120786666870117,
"eval_runtime": 10.6004,
"eval_samples_per_second": 4.717,
"eval_steps_per_second": 0.66,
"step": 32
},
{
"epoch": 1.1092436974789917,
"grad_norm": 0.359375,
"learning_rate": 0.00017765238627180424,
"loss": 2.1163,
"step": 33
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.4140625,
"learning_rate": 0.00017575112421616202,
"loss": 2.0358,
"step": 34
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.353515625,
"learning_rate": 0.00017378332790417273,
"loss": 2.1566,
"step": 35
},
{
"epoch": 1.2100840336134453,
"grad_norm": 0.396484375,
"learning_rate": 0.00017175072570443312,
"loss": 2.1398,
"step": 36
},
{
"epoch": 1.2436974789915967,
"grad_norm": 0.42578125,
"learning_rate": 0.00016965510290629972,
"loss": 2.2475,
"step": 37
},
{
"epoch": 1.2773109243697478,
"grad_norm": 0.37890625,
"learning_rate": 0.00016749830015182107,
"loss": 2.1305,
"step": 38
},
{
"epoch": 1.3109243697478992,
"grad_norm": 0.36328125,
"learning_rate": 0.00016528221181905217,
"loss": 2.2217,
"step": 39
},
{
"epoch": 1.3445378151260505,
"grad_norm": 0.3671875,
"learning_rate": 0.00016300878435817113,
"loss": 2.0591,
"step": 40
},
{
"epoch": 1.3445378151260505,
"eval_loss": 2.0889272689819336,
"eval_runtime": 10.6267,
"eval_samples_per_second": 4.705,
"eval_steps_per_second": 0.659,
"step": 40
},
{
"epoch": 1.3781512605042017,
"grad_norm": 0.375,
"learning_rate": 0.00016068001458185936,
"loss": 2.0942,
"step": 41
},
{
"epoch": 1.4117647058823528,
"grad_norm": 0.396484375,
"learning_rate": 0.0001582979479114472,
"loss": 1.9785,
"step": 42
},
{
"epoch": 1.4453781512605042,
"grad_norm": 0.373046875,
"learning_rate": 0.00015586467658036524,
"loss": 2.2329,
"step": 43
},
{
"epoch": 1.4789915966386555,
"grad_norm": 0.341796875,
"learning_rate": 0.0001533823377964791,
"loss": 1.9181,
"step": 44
},
{
"epoch": 1.5126050420168067,
"grad_norm": 0.353515625,
"learning_rate": 0.00015085311186492206,
"loss": 1.9433,
"step": 45
},
{
"epoch": 1.5462184873949578,
"grad_norm": 0.3671875,
"learning_rate": 0.00014827922027307451,
"loss": 2.0125,
"step": 46
},
{
"epoch": 1.5798319327731094,
"grad_norm": 0.39453125,
"learning_rate": 0.0001456629237393713,
"loss": 2.0676,
"step": 47
},
{
"epoch": 1.6134453781512605,
"grad_norm": 0.369140625,
"learning_rate": 0.00014300652022765207,
"loss": 2.0986,
"step": 48
},
{
"epoch": 1.6134453781512605,
"eval_loss": 2.0764036178588867,
"eval_runtime": 10.6017,
"eval_samples_per_second": 4.716,
"eval_steps_per_second": 0.66,
"step": 48
},
{
"epoch": 1.6470588235294117,
"grad_norm": 0.4375,
"learning_rate": 0.00014031234292879725,
"loss": 2.0797,
"step": 49
},
{
"epoch": 1.680672268907563,
"grad_norm": 0.40234375,
"learning_rate": 0.00013758275821142382,
"loss": 1.8857,
"step": 50
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.365234375,
"learning_rate": 0.0001348201635434399,
"loss": 2.1623,
"step": 51
},
{
"epoch": 1.7478991596638656,
"grad_norm": 0.390625,
"learning_rate": 0.00013202698538628376,
"loss": 2.0474,
"step": 52
},
{
"epoch": 1.7815126050420167,
"grad_norm": 0.388671875,
"learning_rate": 0.00012920567706369758,
"loss": 2.1801,
"step": 53
},
{
"epoch": 1.815126050420168,
"grad_norm": 0.412109375,
"learning_rate": 0.00012635871660690676,
"loss": 2.1065,
"step": 54
},
{
"epoch": 1.8487394957983194,
"grad_norm": 0.42578125,
"learning_rate": 0.00012348860457809838,
"loss": 2.1388,
"step": 55
},
{
"epoch": 1.8823529411764706,
"grad_norm": 0.4140625,
"learning_rate": 0.00012059786187410984,
"loss": 2.0055,
"step": 56
},
{
"epoch": 1.8823529411764706,
"eval_loss": 2.0758326053619385,
"eval_runtime": 10.585,
"eval_samples_per_second": 4.724,
"eval_steps_per_second": 0.661,
"step": 56
},
{
"epoch": 1.9159663865546217,
"grad_norm": 0.361328125,
"learning_rate": 0.0001176890275122573,
"loss": 1.8341,
"step": 57
},
{
"epoch": 1.949579831932773,
"grad_norm": 0.443359375,
"learning_rate": 0.00011476465640024814,
"loss": 2.1179,
"step": 58
},
{
"epoch": 1.9831932773109244,
"grad_norm": 0.375,
"learning_rate": 0.00011182731709213659,
"loss": 1.937,
"step": 59
},
{
"epoch": 2.0168067226890756,
"grad_norm": 0.431640625,
"learning_rate": 0.00010887958953229349,
"loss": 1.9028,
"step": 60
},
{
"epoch": 2.0504201680672267,
"grad_norm": 0.392578125,
"learning_rate": 0.00010592406278937144,
"loss": 2.1867,
"step": 61
},
{
"epoch": 2.0840336134453783,
"grad_norm": 0.341796875,
"learning_rate": 0.00010296333278225599,
"loss": 2.0392,
"step": 62
},
{
"epoch": 2.1176470588235294,
"grad_norm": 0.3671875,
"learning_rate": 0.0001,
"loss": 2.0139,
"step": 63
},
{
"epoch": 2.1512605042016806,
"grad_norm": 0.38671875,
"learning_rate": 9.703666721774402e-05,
"loss": 1.8986,
"step": 64
},
{
"epoch": 2.1512605042016806,
"eval_loss": 2.070317029953003,
"eval_runtime": 10.6071,
"eval_samples_per_second": 4.714,
"eval_steps_per_second": 0.66,
"step": 64
},
{
"epoch": 2.184873949579832,
"grad_norm": 0.37109375,
"learning_rate": 9.407593721062859e-05,
"loss": 1.9843,
"step": 65
},
{
"epoch": 2.2184873949579833,
"grad_norm": 0.3671875,
"learning_rate": 9.112041046770653e-05,
"loss": 1.951,
"step": 66
},
{
"epoch": 2.2521008403361344,
"grad_norm": 0.396484375,
"learning_rate": 8.817268290786343e-05,
"loss": 1.9603,
"step": 67
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.35546875,
"learning_rate": 8.523534359975189e-05,
"loss": 1.9064,
"step": 68
},
{
"epoch": 2.3193277310924367,
"grad_norm": 0.36328125,
"learning_rate": 8.231097248774274e-05,
"loss": 1.8797,
"step": 69
},
{
"epoch": 2.3529411764705883,
"grad_norm": 0.345703125,
"learning_rate": 7.940213812589018e-05,
"loss": 1.8777,
"step": 70
},
{
"epoch": 2.3865546218487395,
"grad_norm": 0.37109375,
"learning_rate": 7.651139542190164e-05,
"loss": 1.9171,
"step": 71
},
{
"epoch": 2.4201680672268906,
"grad_norm": 0.390625,
"learning_rate": 7.364128339309326e-05,
"loss": 1.9346,
"step": 72
},
{
"epoch": 2.4201680672268906,
"eval_loss": 2.0701420307159424,
"eval_runtime": 10.6433,
"eval_samples_per_second": 4.698,
"eval_steps_per_second": 0.658,
"step": 72
},
{
"epoch": 2.453781512605042,
"grad_norm": 0.412109375,
"learning_rate": 7.079432293630244e-05,
"loss": 1.9893,
"step": 73
},
{
"epoch": 2.4873949579831933,
"grad_norm": 0.404296875,
"learning_rate": 6.797301461371625e-05,
"loss": 1.8753,
"step": 74
},
{
"epoch": 2.5210084033613445,
"grad_norm": 0.435546875,
"learning_rate": 6.517983645656014e-05,
"loss": 1.8117,
"step": 75
},
{
"epoch": 2.5546218487394956,
"grad_norm": 0.3828125,
"learning_rate": 6.24172417885762e-05,
"loss": 1.9694,
"step": 76
},
{
"epoch": 2.588235294117647,
"grad_norm": 0.38671875,
"learning_rate": 5.96876570712028e-05,
"loss": 1.9187,
"step": 77
},
{
"epoch": 2.6218487394957983,
"grad_norm": 0.376953125,
"learning_rate": 5.699347977234799e-05,
"loss": 1.896,
"step": 78
},
{
"epoch": 2.6554621848739495,
"grad_norm": 0.3828125,
"learning_rate": 5.43370762606287e-05,
"loss": 2.0854,
"step": 79
},
{
"epoch": 2.689075630252101,
"grad_norm": 0.39453125,
"learning_rate": 5.172077972692553e-05,
"loss": 2.0248,
"step": 80
},
{
"epoch": 2.689075630252101,
"eval_loss": 2.0724899768829346,
"eval_runtime": 10.617,
"eval_samples_per_second": 4.709,
"eval_steps_per_second": 0.659,
"step": 80
},
{
"epoch": 2.722689075630252,
"grad_norm": 0.345703125,
"learning_rate": 4.914688813507797e-05,
"loss": 1.9837,
"step": 81
},
{
"epoch": 2.7563025210084033,
"grad_norm": 0.39453125,
"learning_rate": 4.661766220352097e-05,
"loss": 2.0834,
"step": 82
},
{
"epoch": 2.7899159663865545,
"grad_norm": 0.380859375,
"learning_rate": 4.4135323419634766e-05,
"loss": 1.8418,
"step": 83
},
{
"epoch": 2.8235294117647056,
"grad_norm": 0.408203125,
"learning_rate": 4.170205208855281e-05,
"loss": 1.9686,
"step": 84
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.4140625,
"learning_rate": 3.931998541814069e-05,
"loss": 1.9098,
"step": 85
},
{
"epoch": 2.8907563025210083,
"grad_norm": 0.43359375,
"learning_rate": 3.69912156418289e-05,
"loss": 1.9907,
"step": 86
},
{
"epoch": 2.92436974789916,
"grad_norm": 0.412109375,
"learning_rate": 3.471778818094785e-05,
"loss": 2.0406,
"step": 87
}
],
"logging_steps": 1,
"max_steps": 116,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 29,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5464004832264192.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}