tsunayoshi21's picture
Subiendo mi modelo entrenado
8500a95 verified
raw
history blame contribute delete
No virus
23 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.899159663865546,
"eval_steps": 8,
"global_step": 116,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03361344537815126,
"grad_norm": 0.115234375,
"learning_rate": 2e-05,
"loss": 1.768,
"step": 1
},
{
"epoch": 0.03361344537815126,
"eval_loss": 1.8648816347122192,
"eval_runtime": 18.2501,
"eval_samples_per_second": 2.74,
"eval_steps_per_second": 0.384,
"step": 1
},
{
"epoch": 0.06722689075630252,
"grad_norm": 0.10888671875,
"learning_rate": 4e-05,
"loss": 1.7838,
"step": 2
},
{
"epoch": 0.10084033613445378,
"grad_norm": 0.126953125,
"learning_rate": 6e-05,
"loss": 1.9413,
"step": 3
},
{
"epoch": 0.13445378151260504,
"grad_norm": 0.0986328125,
"learning_rate": 8e-05,
"loss": 1.7757,
"step": 4
},
{
"epoch": 0.16806722689075632,
"grad_norm": 0.12255859375,
"learning_rate": 0.0001,
"loss": 1.735,
"step": 5
},
{
"epoch": 0.20168067226890757,
"grad_norm": 0.10791015625,
"learning_rate": 0.00012,
"loss": 1.8269,
"step": 6
},
{
"epoch": 0.23529411764705882,
"grad_norm": 0.11669921875,
"learning_rate": 0.00014,
"loss": 1.8552,
"step": 7
},
{
"epoch": 0.2689075630252101,
"grad_norm": 0.119140625,
"learning_rate": 0.00016,
"loss": 1.8084,
"step": 8
},
{
"epoch": 0.2689075630252101,
"eval_loss": 1.8317129611968994,
"eval_runtime": 19.6984,
"eval_samples_per_second": 2.538,
"eval_steps_per_second": 0.355,
"step": 8
},
{
"epoch": 0.3025210084033613,
"grad_norm": 0.12255859375,
"learning_rate": 0.00018,
"loss": 1.7158,
"step": 9
},
{
"epoch": 0.33613445378151263,
"grad_norm": 0.134765625,
"learning_rate": 0.0002,
"loss": 1.8702,
"step": 10
},
{
"epoch": 0.3697478991596639,
"grad_norm": 0.12890625,
"learning_rate": 0.00019995608365087946,
"loss": 1.8307,
"step": 11
},
{
"epoch": 0.40336134453781514,
"grad_norm": 0.11474609375,
"learning_rate": 0.00019982437317643217,
"loss": 1.6583,
"step": 12
},
{
"epoch": 0.4369747899159664,
"grad_norm": 0.134765625,
"learning_rate": 0.0001996049842615217,
"loss": 1.6663,
"step": 13
},
{
"epoch": 0.47058823529411764,
"grad_norm": 0.12060546875,
"learning_rate": 0.00019929810960135172,
"loss": 1.7388,
"step": 14
},
{
"epoch": 0.5042016806722689,
"grad_norm": 0.126953125,
"learning_rate": 0.0001989040187322164,
"loss": 1.7485,
"step": 15
},
{
"epoch": 0.5378151260504201,
"grad_norm": 0.1259765625,
"learning_rate": 0.00019842305779475968,
"loss": 1.633,
"step": 16
},
{
"epoch": 0.5378151260504201,
"eval_loss": 1.7832777500152588,
"eval_runtime": 19.6833,
"eval_samples_per_second": 2.54,
"eval_steps_per_second": 0.356,
"step": 16
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.12109375,
"learning_rate": 0.0001978556492299504,
"loss": 1.8373,
"step": 17
},
{
"epoch": 0.6050420168067226,
"grad_norm": 0.1337890625,
"learning_rate": 0.0001972022914080411,
"loss": 1.6552,
"step": 18
},
{
"epoch": 0.6386554621848739,
"grad_norm": 0.126953125,
"learning_rate": 0.00019646355819083589,
"loss": 1.8113,
"step": 19
},
{
"epoch": 0.6722689075630253,
"grad_norm": 0.1220703125,
"learning_rate": 0.00019564009842765225,
"loss": 1.6544,
"step": 20
},
{
"epoch": 0.7058823529411765,
"grad_norm": 0.11669921875,
"learning_rate": 0.00019473263538541914,
"loss": 1.6649,
"step": 21
},
{
"epoch": 0.7394957983193278,
"grad_norm": 0.0986328125,
"learning_rate": 0.0001937419661134121,
"loss": 1.6868,
"step": 22
},
{
"epoch": 0.773109243697479,
"grad_norm": 0.126953125,
"learning_rate": 0.00019266896074318334,
"loss": 1.7762,
"step": 23
},
{
"epoch": 0.8067226890756303,
"grad_norm": 0.11279296875,
"learning_rate": 0.00019151456172430183,
"loss": 1.6737,
"step": 24
},
{
"epoch": 0.8067226890756303,
"eval_loss": 1.7643933296203613,
"eval_runtime": 19.6308,
"eval_samples_per_second": 2.547,
"eval_steps_per_second": 0.357,
"step": 24
},
{
"epoch": 0.8403361344537815,
"grad_norm": 0.1298828125,
"learning_rate": 0.00019027978299657436,
"loss": 1.6401,
"step": 25
},
{
"epoch": 0.8739495798319328,
"grad_norm": 0.099609375,
"learning_rate": 0.00018896570909947475,
"loss": 1.7068,
"step": 26
},
{
"epoch": 0.907563025210084,
"grad_norm": 0.12060546875,
"learning_rate": 0.0001875734942195637,
"loss": 1.8112,
"step": 27
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.1162109375,
"learning_rate": 0.00018610436117673555,
"loss": 1.6596,
"step": 28
},
{
"epoch": 0.9747899159663865,
"grad_norm": 0.12890625,
"learning_rate": 0.0001845596003501826,
"loss": 1.7936,
"step": 29
},
{
"epoch": 1.0084033613445378,
"grad_norm": 0.1240234375,
"learning_rate": 0.0001829405685450202,
"loss": 1.7947,
"step": 30
},
{
"epoch": 1.0420168067226891,
"grad_norm": 0.20703125,
"learning_rate": 0.00018124868780056814,
"loss": 1.6887,
"step": 31
},
{
"epoch": 1.0756302521008403,
"grad_norm": 0.1455078125,
"learning_rate": 0.00017948544414133534,
"loss": 1.6722,
"step": 32
},
{
"epoch": 1.0756302521008403,
"eval_loss": 1.7600828409194946,
"eval_runtime": 19.7105,
"eval_samples_per_second": 2.537,
"eval_steps_per_second": 0.355,
"step": 32
},
{
"epoch": 1.1092436974789917,
"grad_norm": 0.09814453125,
"learning_rate": 0.00017765238627180424,
"loss": 1.7145,
"step": 33
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.10693359375,
"learning_rate": 0.00017575112421616202,
"loss": 1.6609,
"step": 34
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.11572265625,
"learning_rate": 0.00017378332790417273,
"loss": 1.6681,
"step": 35
},
{
"epoch": 1.2100840336134453,
"grad_norm": 0.11767578125,
"learning_rate": 0.00017175072570443312,
"loss": 1.6641,
"step": 36
},
{
"epoch": 1.2436974789915967,
"grad_norm": 0.11376953125,
"learning_rate": 0.00016965510290629972,
"loss": 1.7011,
"step": 37
},
{
"epoch": 1.2773109243697478,
"grad_norm": 0.11572265625,
"learning_rate": 0.00016749830015182107,
"loss": 1.7171,
"step": 38
},
{
"epoch": 1.3109243697478992,
"grad_norm": 0.103515625,
"learning_rate": 0.00016528221181905217,
"loss": 1.6333,
"step": 39
},
{
"epoch": 1.3445378151260505,
"grad_norm": 0.111328125,
"learning_rate": 0.00016300878435817113,
"loss": 1.7162,
"step": 40
},
{
"epoch": 1.3445378151260505,
"eval_loss": 1.757140040397644,
"eval_runtime": 19.6485,
"eval_samples_per_second": 2.545,
"eval_steps_per_second": 0.356,
"step": 40
},
{
"epoch": 1.3781512605042017,
"grad_norm": 0.1484375,
"learning_rate": 0.00016068001458185936,
"loss": 1.6501,
"step": 41
},
{
"epoch": 1.4117647058823528,
"grad_norm": 0.1240234375,
"learning_rate": 0.0001582979479114472,
"loss": 1.6446,
"step": 42
},
{
"epoch": 1.4453781512605042,
"grad_norm": 0.119140625,
"learning_rate": 0.00015586467658036524,
"loss": 1.7104,
"step": 43
},
{
"epoch": 1.4789915966386555,
"grad_norm": 0.109375,
"learning_rate": 0.0001533823377964791,
"loss": 1.6146,
"step": 44
},
{
"epoch": 1.5126050420168067,
"grad_norm": 0.11572265625,
"learning_rate": 0.00015085311186492206,
"loss": 1.6448,
"step": 45
},
{
"epoch": 1.5462184873949578,
"grad_norm": 0.11572265625,
"learning_rate": 0.00014827922027307451,
"loss": 1.6735,
"step": 46
},
{
"epoch": 1.5798319327731094,
"grad_norm": 0.1181640625,
"learning_rate": 0.0001456629237393713,
"loss": 1.6604,
"step": 47
},
{
"epoch": 1.6134453781512605,
"grad_norm": 0.12060546875,
"learning_rate": 0.00014300652022765207,
"loss": 1.7046,
"step": 48
},
{
"epoch": 1.6134453781512605,
"eval_loss": 1.7558497190475464,
"eval_runtime": 19.7723,
"eval_samples_per_second": 2.529,
"eval_steps_per_second": 0.354,
"step": 48
},
{
"epoch": 1.6470588235294117,
"grad_norm": 0.1259765625,
"learning_rate": 0.00014031234292879725,
"loss": 1.694,
"step": 49
},
{
"epoch": 1.680672268907563,
"grad_norm": 0.1220703125,
"learning_rate": 0.00013758275821142382,
"loss": 1.625,
"step": 50
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.123046875,
"learning_rate": 0.0001348201635434399,
"loss": 1.6919,
"step": 51
},
{
"epoch": 1.7478991596638656,
"grad_norm": 0.1201171875,
"learning_rate": 0.00013202698538628376,
"loss": 1.6777,
"step": 52
},
{
"epoch": 1.7815126050420167,
"grad_norm": 0.12890625,
"learning_rate": 0.00012920567706369758,
"loss": 1.7762,
"step": 53
},
{
"epoch": 1.815126050420168,
"grad_norm": 0.126953125,
"learning_rate": 0.00012635871660690676,
"loss": 1.6668,
"step": 54
},
{
"epoch": 1.8487394957983194,
"grad_norm": 0.125,
"learning_rate": 0.00012348860457809838,
"loss": 1.7061,
"step": 55
},
{
"epoch": 1.8823529411764706,
"grad_norm": 0.1328125,
"learning_rate": 0.00012059786187410984,
"loss": 1.6714,
"step": 56
},
{
"epoch": 1.8823529411764706,
"eval_loss": 1.7563551664352417,
"eval_runtime": 19.6588,
"eval_samples_per_second": 2.543,
"eval_steps_per_second": 0.356,
"step": 56
},
{
"epoch": 1.9159663865546217,
"grad_norm": 0.1279296875,
"learning_rate": 0.0001176890275122573,
"loss": 1.6318,
"step": 57
},
{
"epoch": 1.949579831932773,
"grad_norm": 0.1318359375,
"learning_rate": 0.00011476465640024814,
"loss": 1.6693,
"step": 58
},
{
"epoch": 1.9831932773109244,
"grad_norm": 0.12353515625,
"learning_rate": 0.00011182731709213659,
"loss": 1.5927,
"step": 59
},
{
"epoch": 2.0168067226890756,
"grad_norm": 0.138671875,
"learning_rate": 0.00010887958953229349,
"loss": 1.6558,
"step": 60
},
{
"epoch": 2.0504201680672267,
"grad_norm": 0.134765625,
"learning_rate": 0.00010592406278937144,
"loss": 1.7352,
"step": 61
},
{
"epoch": 2.0840336134453783,
"grad_norm": 0.1328125,
"learning_rate": 0.00010296333278225599,
"loss": 1.6216,
"step": 62
},
{
"epoch": 2.1176470588235294,
"grad_norm": 0.1201171875,
"learning_rate": 0.0001,
"loss": 1.6365,
"step": 63
},
{
"epoch": 2.1512605042016806,
"grad_norm": 0.1298828125,
"learning_rate": 9.703666721774402e-05,
"loss": 1.6249,
"step": 64
},
{
"epoch": 2.1512605042016806,
"eval_loss": 1.756639838218689,
"eval_runtime": 19.5951,
"eval_samples_per_second": 2.552,
"eval_steps_per_second": 0.357,
"step": 64
},
{
"epoch": 2.184873949579832,
"grad_norm": 0.1357421875,
"learning_rate": 9.407593721062859e-05,
"loss": 1.653,
"step": 65
},
{
"epoch": 2.2184873949579833,
"grad_norm": 0.146484375,
"learning_rate": 9.112041046770653e-05,
"loss": 1.6545,
"step": 66
},
{
"epoch": 2.2521008403361344,
"grad_norm": 0.13671875,
"learning_rate": 8.817268290786343e-05,
"loss": 1.5787,
"step": 67
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.1328125,
"learning_rate": 8.523534359975189e-05,
"loss": 1.6532,
"step": 68
},
{
"epoch": 2.3193277310924367,
"grad_norm": 0.1396484375,
"learning_rate": 8.231097248774274e-05,
"loss": 1.6784,
"step": 69
},
{
"epoch": 2.3529411764705883,
"grad_norm": 0.1337890625,
"learning_rate": 7.940213812589018e-05,
"loss": 1.5721,
"step": 70
},
{
"epoch": 2.3865546218487395,
"grad_norm": 0.1416015625,
"learning_rate": 7.651139542190164e-05,
"loss": 1.5836,
"step": 71
},
{
"epoch": 2.4201680672268906,
"grad_norm": 0.146484375,
"learning_rate": 7.364128339309326e-05,
"loss": 1.5604,
"step": 72
},
{
"epoch": 2.4201680672268906,
"eval_loss": 1.7598735094070435,
"eval_runtime": 19.7508,
"eval_samples_per_second": 2.532,
"eval_steps_per_second": 0.354,
"step": 72
},
{
"epoch": 2.453781512605042,
"grad_norm": 0.1455078125,
"learning_rate": 7.079432293630244e-05,
"loss": 1.6259,
"step": 73
},
{
"epoch": 2.4873949579831933,
"grad_norm": 0.1484375,
"learning_rate": 6.797301461371625e-05,
"loss": 1.5811,
"step": 74
},
{
"epoch": 2.5210084033613445,
"grad_norm": 0.14453125,
"learning_rate": 6.517983645656014e-05,
"loss": 1.4929,
"step": 75
},
{
"epoch": 2.5546218487394956,
"grad_norm": 0.1572265625,
"learning_rate": 6.24172417885762e-05,
"loss": 1.7014,
"step": 76
},
{
"epoch": 2.588235294117647,
"grad_norm": 0.1484375,
"learning_rate": 5.96876570712028e-05,
"loss": 1.5623,
"step": 77
},
{
"epoch": 2.6218487394957983,
"grad_norm": 0.1474609375,
"learning_rate": 5.699347977234799e-05,
"loss": 1.6006,
"step": 78
},
{
"epoch": 2.6554621848739495,
"grad_norm": 0.150390625,
"learning_rate": 5.43370762606287e-05,
"loss": 1.6641,
"step": 79
},
{
"epoch": 2.689075630252101,
"grad_norm": 0.15234375,
"learning_rate": 5.172077972692553e-05,
"loss": 1.7003,
"step": 80
},
{
"epoch": 2.689075630252101,
"eval_loss": 1.761399269104004,
"eval_runtime": 19.6692,
"eval_samples_per_second": 2.542,
"eval_steps_per_second": 0.356,
"step": 80
},
{
"epoch": 2.722689075630252,
"grad_norm": 0.154296875,
"learning_rate": 4.914688813507797e-05,
"loss": 1.6923,
"step": 81
},
{
"epoch": 2.7563025210084033,
"grad_norm": 0.158203125,
"learning_rate": 4.661766220352097e-05,
"loss": 1.6819,
"step": 82
},
{
"epoch": 2.7899159663865545,
"grad_norm": 0.1513671875,
"learning_rate": 4.4135323419634766e-05,
"loss": 1.5649,
"step": 83
},
{
"epoch": 2.8235294117647056,
"grad_norm": 0.1494140625,
"learning_rate": 4.170205208855281e-05,
"loss": 1.608,
"step": 84
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.1611328125,
"learning_rate": 3.931998541814069e-05,
"loss": 1.5474,
"step": 85
},
{
"epoch": 2.8907563025210083,
"grad_norm": 0.1552734375,
"learning_rate": 3.69912156418289e-05,
"loss": 1.6484,
"step": 86
},
{
"epoch": 2.92436974789916,
"grad_norm": 0.1484375,
"learning_rate": 3.471778818094785e-05,
"loss": 1.6145,
"step": 87
},
{
"epoch": 2.957983193277311,
"grad_norm": 0.158203125,
"learning_rate": 3.250169984817897e-05,
"loss": 1.7115,
"step": 88
},
{
"epoch": 2.957983193277311,
"eval_loss": 1.7605273723602295,
"eval_runtime": 19.7632,
"eval_samples_per_second": 2.53,
"eval_steps_per_second": 0.354,
"step": 88
},
{
"epoch": 2.991596638655462,
"grad_norm": 0.1591796875,
"learning_rate": 3.034489709370033e-05,
"loss": 1.6485,
"step": 89
},
{
"epoch": 3.0252100840336134,
"grad_norm": 0.16015625,
"learning_rate": 2.8249274295566864e-05,
"loss": 1.5097,
"step": 90
},
{
"epoch": 3.0588235294117645,
"grad_norm": 0.1513671875,
"learning_rate": 2.6216672095827266e-05,
"loss": 1.5918,
"step": 91
},
{
"epoch": 3.092436974789916,
"grad_norm": 0.1484375,
"learning_rate": 2.4248875783837987e-05,
"loss": 1.6116,
"step": 92
},
{
"epoch": 3.1260504201680672,
"grad_norm": 0.1552734375,
"learning_rate": 2.234761372819577e-05,
"loss": 1.5989,
"step": 93
},
{
"epoch": 3.1596638655462184,
"grad_norm": 0.154296875,
"learning_rate": 2.0514555858664663e-05,
"loss": 1.6061,
"step": 94
},
{
"epoch": 3.19327731092437,
"grad_norm": 0.16015625,
"learning_rate": 1.875131219943187e-05,
"loss": 1.549,
"step": 95
},
{
"epoch": 3.226890756302521,
"grad_norm": 0.15625,
"learning_rate": 1.7059431454979824e-05,
"loss": 1.5937,
"step": 96
},
{
"epoch": 3.226890756302521,
"eval_loss": 1.7609126567840576,
"eval_runtime": 19.7273,
"eval_samples_per_second": 2.535,
"eval_steps_per_second": 0.355,
"step": 96
},
{
"epoch": 3.2605042016806722,
"grad_norm": 0.1611328125,
"learning_rate": 1.5440399649817385e-05,
"loss": 1.6636,
"step": 97
},
{
"epoch": 3.2941176470588234,
"grad_norm": 0.15625,
"learning_rate": 1.3895638823264446e-05,
"loss": 1.6452,
"step": 98
},
{
"epoch": 3.327731092436975,
"grad_norm": 0.146484375,
"learning_rate": 1.2426505780436326e-05,
"loss": 1.6122,
"step": 99
},
{
"epoch": 3.361344537815126,
"grad_norm": 0.154296875,
"learning_rate": 1.103429090052528e-05,
"loss": 1.7153,
"step": 100
},
{
"epoch": 3.3949579831932772,
"grad_norm": 0.154296875,
"learning_rate": 9.720217003425647e-06,
"loss": 1.5094,
"step": 101
},
{
"epoch": 3.4285714285714284,
"grad_norm": 0.146484375,
"learning_rate": 8.485438275698154e-06,
"loss": 1.5703,
"step": 102
},
{
"epoch": 3.46218487394958,
"grad_norm": 0.1572265625,
"learning_rate": 7.331039256816663e-06,
"loss": 1.5971,
"step": 103
},
{
"epoch": 3.495798319327731,
"grad_norm": 0.158203125,
"learning_rate": 6.258033886587911e-06,
"loss": 1.655,
"step": 104
},
{
"epoch": 3.495798319327731,
"eval_loss": 1.7612451314926147,
"eval_runtime": 19.7385,
"eval_samples_per_second": 2.533,
"eval_steps_per_second": 0.355,
"step": 104
},
{
"epoch": 3.5294117647058822,
"grad_norm": 0.158203125,
"learning_rate": 5.267364614580861e-06,
"loss": 1.6928,
"step": 105
},
{
"epoch": 3.5630252100840334,
"grad_norm": 0.1474609375,
"learning_rate": 4.359901572347758e-06,
"loss": 1.6083,
"step": 106
},
{
"epoch": 3.596638655462185,
"grad_norm": 0.1591796875,
"learning_rate": 3.5364418091641373e-06,
"loss": 1.6547,
"step": 107
},
{
"epoch": 3.630252100840336,
"grad_norm": 0.1513671875,
"learning_rate": 2.7977085919589254e-06,
"loss": 1.5425,
"step": 108
},
{
"epoch": 3.6638655462184873,
"grad_norm": 0.154296875,
"learning_rate": 2.144350770049597e-06,
"loss": 1.5763,
"step": 109
},
{
"epoch": 3.697478991596639,
"grad_norm": 0.1513671875,
"learning_rate": 1.576942205240317e-06,
"loss": 1.513,
"step": 110
},
{
"epoch": 3.73109243697479,
"grad_norm": 0.15234375,
"learning_rate": 1.0959812677835968e-06,
"loss": 1.5338,
"step": 111
},
{
"epoch": 3.764705882352941,
"grad_norm": 0.150390625,
"learning_rate": 7.018903986483083e-07,
"loss": 1.5829,
"step": 112
},
{
"epoch": 3.764705882352941,
"eval_loss": 1.7612619400024414,
"eval_runtime": 19.83,
"eval_samples_per_second": 2.521,
"eval_steps_per_second": 0.353,
"step": 112
},
{
"epoch": 3.7983193277310923,
"grad_norm": 0.1572265625,
"learning_rate": 3.950157384783104e-07,
"loss": 1.5591,
"step": 113
},
{
"epoch": 3.831932773109244,
"grad_norm": 0.1533203125,
"learning_rate": 1.7562682356786487e-07,
"loss": 1.6213,
"step": 114
},
{
"epoch": 3.865546218487395,
"grad_norm": 0.1572265625,
"learning_rate": 4.391634912056519e-08,
"loss": 1.6246,
"step": 115
},
{
"epoch": 3.899159663865546,
"grad_norm": 0.1591796875,
"learning_rate": 0.0,
"loss": 1.5533,
"step": 116
}
],
"logging_steps": 1,
"max_steps": 116,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 29,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.3243100413820928e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}