sanchit-gandhi's picture
End of training
9bad30d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 99,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010101010101010102,
"grad_norm": 1.4316415786743164,
"learning_rate": 2.96969696969697e-05,
"loss": 10.0726,
"step": 1
},
{
"epoch": 0.020202020202020204,
"grad_norm": 1.560971736907959,
"learning_rate": 2.9393939393939394e-05,
"loss": 10.129,
"step": 2
},
{
"epoch": 0.030303030303030304,
"grad_norm": 1.4093116521835327,
"learning_rate": 2.9090909090909093e-05,
"loss": 10.0404,
"step": 3
},
{
"epoch": 0.04040404040404041,
"grad_norm": 1.5073634386062622,
"learning_rate": 2.8787878787878788e-05,
"loss": 10.0214,
"step": 4
},
{
"epoch": 0.050505050505050504,
"grad_norm": 1.6120322942733765,
"learning_rate": 2.8484848484848486e-05,
"loss": 10.0287,
"step": 5
},
{
"epoch": 0.06060606060606061,
"grad_norm": 1.8709332942962646,
"learning_rate": 2.8181818181818185e-05,
"loss": 10.0744,
"step": 6
},
{
"epoch": 0.0707070707070707,
"grad_norm": 1.8668410778045654,
"learning_rate": 2.787878787878788e-05,
"loss": 9.9539,
"step": 7
},
{
"epoch": 0.08080808080808081,
"grad_norm": 1.9248428344726562,
"learning_rate": 2.7575757575757578e-05,
"loss": 9.8166,
"step": 8
},
{
"epoch": 0.09090909090909091,
"grad_norm": 1.9779285192489624,
"learning_rate": 2.7272727272727273e-05,
"loss": 9.7811,
"step": 9
},
{
"epoch": 0.10101010101010101,
"grad_norm": 2.0661659240722656,
"learning_rate": 2.696969696969697e-05,
"loss": 9.743,
"step": 10
},
{
"epoch": 0.1111111111111111,
"grad_norm": 2.304006338119507,
"learning_rate": 2.6666666666666667e-05,
"loss": 9.7368,
"step": 11
},
{
"epoch": 0.12121212121212122,
"grad_norm": 2.2433547973632812,
"learning_rate": 2.6363636363636365e-05,
"loss": 9.6052,
"step": 12
},
{
"epoch": 0.13131313131313133,
"grad_norm": 2.3963887691497803,
"learning_rate": 2.6060606060606063e-05,
"loss": 9.5241,
"step": 13
},
{
"epoch": 0.1414141414141414,
"grad_norm": 2.690804958343506,
"learning_rate": 2.575757575757576e-05,
"loss": 9.4771,
"step": 14
},
{
"epoch": 0.15151515151515152,
"grad_norm": 2.54472017288208,
"learning_rate": 2.5454545454545457e-05,
"loss": 9.3078,
"step": 15
},
{
"epoch": 0.16161616161616163,
"grad_norm": 2.672468662261963,
"learning_rate": 2.5151515151515152e-05,
"loss": 9.295,
"step": 16
},
{
"epoch": 0.1717171717171717,
"grad_norm": 2.9068398475646973,
"learning_rate": 2.484848484848485e-05,
"loss": 9.233,
"step": 17
},
{
"epoch": 0.18181818181818182,
"grad_norm": 2.6378564834594727,
"learning_rate": 2.454545454545455e-05,
"loss": 9.0833,
"step": 18
},
{
"epoch": 0.1919191919191919,
"grad_norm": 2.9194931983947754,
"learning_rate": 2.4242424242424244e-05,
"loss": 9.0367,
"step": 19
},
{
"epoch": 0.20202020202020202,
"grad_norm": 2.9216768741607666,
"learning_rate": 2.3939393939393942e-05,
"loss": 8.9809,
"step": 20
},
{
"epoch": 0.21212121212121213,
"grad_norm": 2.6998298168182373,
"learning_rate": 2.3636363636363637e-05,
"loss": 8.8696,
"step": 21
},
{
"epoch": 0.2222222222222222,
"grad_norm": 2.8181002140045166,
"learning_rate": 2.3333333333333336e-05,
"loss": 8.8195,
"step": 22
},
{
"epoch": 0.23232323232323232,
"grad_norm": 2.82079815864563,
"learning_rate": 2.303030303030303e-05,
"loss": 8.7101,
"step": 23
},
{
"epoch": 0.24242424242424243,
"grad_norm": 2.900256633758545,
"learning_rate": 2.272727272727273e-05,
"loss": 8.6645,
"step": 24
},
{
"epoch": 0.25252525252525254,
"grad_norm": 2.9626667499542236,
"learning_rate": 2.2424242424242424e-05,
"loss": 8.4995,
"step": 25
},
{
"epoch": 0.26262626262626265,
"grad_norm": 2.78900146484375,
"learning_rate": 2.212121212121212e-05,
"loss": 8.5197,
"step": 26
},
{
"epoch": 0.2727272727272727,
"grad_norm": 2.8724753856658936,
"learning_rate": 2.1818181818181818e-05,
"loss": 8.3911,
"step": 27
},
{
"epoch": 0.2828282828282828,
"grad_norm": 3.0413928031921387,
"learning_rate": 2.1515151515151513e-05,
"loss": 8.3371,
"step": 28
},
{
"epoch": 0.29292929292929293,
"grad_norm": 2.8662936687469482,
"learning_rate": 2.121212121212121e-05,
"loss": 8.274,
"step": 29
},
{
"epoch": 0.30303030303030304,
"grad_norm": 2.660316228866577,
"learning_rate": 2.090909090909091e-05,
"loss": 8.2402,
"step": 30
},
{
"epoch": 0.31313131313131315,
"grad_norm": 2.8771886825561523,
"learning_rate": 2.0606060606060605e-05,
"loss": 8.164,
"step": 31
},
{
"epoch": 0.32323232323232326,
"grad_norm": 2.6009693145751953,
"learning_rate": 2.0303030303030303e-05,
"loss": 8.1005,
"step": 32
},
{
"epoch": 0.3333333333333333,
"grad_norm": 2.431004285812378,
"learning_rate": 1.9999999999999998e-05,
"loss": 8.0474,
"step": 33
},
{
"epoch": 0.3434343434343434,
"grad_norm": 2.523425817489624,
"learning_rate": 1.9696969696969697e-05,
"loss": 7.9858,
"step": 34
},
{
"epoch": 0.35353535353535354,
"grad_norm": 2.4093704223632812,
"learning_rate": 1.9393939393939395e-05,
"loss": 7.9892,
"step": 35
},
{
"epoch": 0.36363636363636365,
"grad_norm": 2.5664756298065186,
"learning_rate": 1.909090909090909e-05,
"loss": 7.8404,
"step": 36
},
{
"epoch": 0.37373737373737376,
"grad_norm": 1.9647918939590454,
"learning_rate": 1.878787878787879e-05,
"loss": 7.8655,
"step": 37
},
{
"epoch": 0.3838383838383838,
"grad_norm": 2.1500051021575928,
"learning_rate": 1.8484848484848484e-05,
"loss": 7.8058,
"step": 38
},
{
"epoch": 0.3939393939393939,
"grad_norm": 1.9459105730056763,
"learning_rate": 1.8181818181818182e-05,
"loss": 7.821,
"step": 39
},
{
"epoch": 0.40404040404040403,
"grad_norm": 1.8056164979934692,
"learning_rate": 1.7878787878787877e-05,
"loss": 7.814,
"step": 40
},
{
"epoch": 0.41414141414141414,
"grad_norm": 2.448673725128174,
"learning_rate": 1.7575757575757576e-05,
"loss": 7.5915,
"step": 41
},
{
"epoch": 0.42424242424242425,
"grad_norm": 1.6147074699401855,
"learning_rate": 1.7272727272727274e-05,
"loss": 7.7663,
"step": 42
},
{
"epoch": 0.43434343434343436,
"grad_norm": 1.7669254541397095,
"learning_rate": 1.696969696969697e-05,
"loss": 7.6066,
"step": 43
},
{
"epoch": 0.4444444444444444,
"grad_norm": 1.938239574432373,
"learning_rate": 1.6666666666666667e-05,
"loss": 7.6132,
"step": 44
},
{
"epoch": 0.45454545454545453,
"grad_norm": 2.3109822273254395,
"learning_rate": 1.6363636363636363e-05,
"loss": 7.4384,
"step": 45
},
{
"epoch": 0.46464646464646464,
"grad_norm": 1.5837619304656982,
"learning_rate": 1.606060606060606e-05,
"loss": 7.6066,
"step": 46
},
{
"epoch": 0.47474747474747475,
"grad_norm": 1.6850427389144897,
"learning_rate": 1.575757575757576e-05,
"loss": 7.474,
"step": 47
},
{
"epoch": 0.48484848484848486,
"grad_norm": 1.988224983215332,
"learning_rate": 1.5454545454545454e-05,
"loss": 7.3918,
"step": 48
},
{
"epoch": 0.494949494949495,
"grad_norm": 1.754016637802124,
"learning_rate": 1.5151515151515153e-05,
"loss": 7.3915,
"step": 49
},
{
"epoch": 0.5050505050505051,
"grad_norm": 2.1182453632354736,
"learning_rate": 1.484848484848485e-05,
"loss": 7.4276,
"step": 50
},
{
"epoch": 0.5151515151515151,
"grad_norm": 1.7513837814331055,
"learning_rate": 1.4545454545454546e-05,
"loss": 7.3634,
"step": 51
},
{
"epoch": 0.5252525252525253,
"grad_norm": 1.5384173393249512,
"learning_rate": 1.4242424242424243e-05,
"loss": 7.3046,
"step": 52
},
{
"epoch": 0.5353535353535354,
"grad_norm": 1.2067221403121948,
"learning_rate": 1.393939393939394e-05,
"loss": 7.5319,
"step": 53
},
{
"epoch": 0.5454545454545454,
"grad_norm": 1.1458990573883057,
"learning_rate": 1.3636363636363637e-05,
"loss": 7.354,
"step": 54
},
{
"epoch": 0.5555555555555556,
"grad_norm": 1.2268686294555664,
"learning_rate": 1.3333333333333333e-05,
"loss": 7.3975,
"step": 55
},
{
"epoch": 0.5656565656565656,
"grad_norm": 1.130756139755249,
"learning_rate": 1.3030303030303032e-05,
"loss": 7.4055,
"step": 56
},
{
"epoch": 0.5757575757575758,
"grad_norm": 1.1011919975280762,
"learning_rate": 1.2727272727272728e-05,
"loss": 7.4282,
"step": 57
},
{
"epoch": 0.5858585858585859,
"grad_norm": 1.1305310726165771,
"learning_rate": 1.2424242424242425e-05,
"loss": 7.2983,
"step": 58
},
{
"epoch": 0.5959595959595959,
"grad_norm": 1.2476590871810913,
"learning_rate": 1.2121212121212122e-05,
"loss": 7.4395,
"step": 59
},
{
"epoch": 0.6060606060606061,
"grad_norm": 1.0120925903320312,
"learning_rate": 1.1818181818181819e-05,
"loss": 7.3328,
"step": 60
},
{
"epoch": 0.6161616161616161,
"grad_norm": 1.1586886644363403,
"learning_rate": 1.1515151515151515e-05,
"loss": 7.3609,
"step": 61
},
{
"epoch": 0.6262626262626263,
"grad_norm": 0.9742720127105713,
"learning_rate": 1.1212121212121212e-05,
"loss": 7.3204,
"step": 62
},
{
"epoch": 0.6363636363636364,
"grad_norm": 0.9039322733879089,
"learning_rate": 1.0909090909090909e-05,
"loss": 7.3262,
"step": 63
},
{
"epoch": 0.6464646464646465,
"grad_norm": 1.1389368772506714,
"learning_rate": 1.0606060606060606e-05,
"loss": 7.3952,
"step": 64
},
{
"epoch": 0.6565656565656566,
"grad_norm": 0.9162051677703857,
"learning_rate": 1.0303030303030302e-05,
"loss": 7.2239,
"step": 65
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.9564353823661804,
"learning_rate": 9.999999999999999e-06,
"loss": 7.2159,
"step": 66
},
{
"epoch": 0.6767676767676768,
"grad_norm": 1.0328965187072754,
"learning_rate": 9.696969696969698e-06,
"loss": 7.2235,
"step": 67
},
{
"epoch": 0.6868686868686869,
"grad_norm": 1.2637859582901,
"learning_rate": 9.393939393939394e-06,
"loss": 7.403,
"step": 68
},
{
"epoch": 0.696969696969697,
"grad_norm": 0.8482282757759094,
"learning_rate": 9.090909090909091e-06,
"loss": 7.2136,
"step": 69
},
{
"epoch": 0.7070707070707071,
"grad_norm": 0.748203694820404,
"learning_rate": 8.787878787878788e-06,
"loss": 7.2005,
"step": 70
},
{
"epoch": 0.7171717171717171,
"grad_norm": 0.8983027338981628,
"learning_rate": 8.484848484848485e-06,
"loss": 7.2121,
"step": 71
},
{
"epoch": 0.7272727272727273,
"grad_norm": 1.0953037738800049,
"learning_rate": 8.181818181818181e-06,
"loss": 7.2214,
"step": 72
},
{
"epoch": 0.7373737373737373,
"grad_norm": NaN,
"learning_rate": 8.181818181818181e-06,
"loss": 0.0,
"step": 73
},
{
"epoch": 0.7474747474747475,
"grad_norm": 0.7874919772148132,
"learning_rate": 7.87878787878788e-06,
"loss": 7.2169,
"step": 74
},
{
"epoch": 0.7575757575757576,
"grad_norm": 1.9311456680297852,
"learning_rate": 7.5757575757575764e-06,
"loss": 7.1586,
"step": 75
},
{
"epoch": 0.7676767676767676,
"grad_norm": 0.9535604119300842,
"learning_rate": 7.272727272727273e-06,
"loss": 7.3104,
"step": 76
},
{
"epoch": 0.7777777777777778,
"grad_norm": 0.9301349520683289,
"learning_rate": 6.96969696969697e-06,
"loss": 7.2056,
"step": 77
},
{
"epoch": 0.7878787878787878,
"grad_norm": 1.5866955518722534,
"learning_rate": 6.666666666666667e-06,
"loss": 7.1204,
"step": 78
},
{
"epoch": 0.797979797979798,
"grad_norm": 1.0217430591583252,
"learning_rate": 6.363636363636364e-06,
"loss": 7.233,
"step": 79
},
{
"epoch": 0.8080808080808081,
"grad_norm": 0.6997321844100952,
"learning_rate": 6.060606060606061e-06,
"loss": 7.2076,
"step": 80
},
{
"epoch": 0.8181818181818182,
"grad_norm": 0.8969782590866089,
"learning_rate": 5.757575757575758e-06,
"loss": 7.1913,
"step": 81
},
{
"epoch": 0.8282828282828283,
"grad_norm": 1.0181961059570312,
"learning_rate": 5.4545454545454545e-06,
"loss": 7.3311,
"step": 82
},
{
"epoch": 0.8383838383838383,
"grad_norm": 1.2779223918914795,
"learning_rate": 5.151515151515151e-06,
"loss": 7.0971,
"step": 83
},
{
"epoch": 0.8484848484848485,
"grad_norm": 0.9610503315925598,
"learning_rate": 4.848484848484849e-06,
"loss": 7.232,
"step": 84
},
{
"epoch": 0.8585858585858586,
"grad_norm": 0.8355559706687927,
"learning_rate": 4.5454545454545455e-06,
"loss": 7.2681,
"step": 85
},
{
"epoch": 0.8686868686868687,
"grad_norm": 1.0366374254226685,
"learning_rate": 4.242424242424242e-06,
"loss": 7.2509,
"step": 86
},
{
"epoch": 0.8787878787878788,
"grad_norm": 0.8738848567008972,
"learning_rate": 3.93939393939394e-06,
"loss": 7.1105,
"step": 87
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.8608594536781311,
"learning_rate": 3.6363636363636366e-06,
"loss": 7.1243,
"step": 88
},
{
"epoch": 0.898989898989899,
"grad_norm": 1.1317100524902344,
"learning_rate": 3.3333333333333333e-06,
"loss": 7.0372,
"step": 89
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.802678108215332,
"learning_rate": 3.0303030303030305e-06,
"loss": 7.1456,
"step": 90
},
{
"epoch": 0.9191919191919192,
"grad_norm": 1.3433420658111572,
"learning_rate": 2.7272727272727272e-06,
"loss": 7.3152,
"step": 91
},
{
"epoch": 0.9292929292929293,
"grad_norm": 1.0658912658691406,
"learning_rate": 2.4242424242424244e-06,
"loss": 7.3189,
"step": 92
},
{
"epoch": 0.9393939393939394,
"grad_norm": 0.9861695170402527,
"learning_rate": 2.121212121212121e-06,
"loss": 7.2579,
"step": 93
},
{
"epoch": 0.9494949494949495,
"grad_norm": 0.9982382655143738,
"learning_rate": 1.8181818181818183e-06,
"loss": 7.2653,
"step": 94
},
{
"epoch": 0.9595959595959596,
"grad_norm": 0.8333445191383362,
"learning_rate": 1.5151515151515152e-06,
"loss": 7.159,
"step": 95
},
{
"epoch": 0.9696969696969697,
"grad_norm": 0.7748318910598755,
"learning_rate": 1.2121212121212122e-06,
"loss": 7.1589,
"step": 96
},
{
"epoch": 0.9797979797979798,
"grad_norm": 0.8152617812156677,
"learning_rate": 9.090909090909091e-07,
"loss": 7.2117,
"step": 97
},
{
"epoch": 0.98989898989899,
"grad_norm": 1.3867417573928833,
"learning_rate": 6.060606060606061e-07,
"loss": 6.9967,
"step": 98
},
{
"epoch": 1.0,
"grad_norm": 1.3770259618759155,
"learning_rate": 3.0303030303030305e-07,
"loss": 7.3112,
"step": 99
},
{
"epoch": 1.0,
"eval_audio_cosine_sim": 0.4540814161300659,
"eval_loss": 3.913311243057251,
"eval_runtime": 301.4584,
"eval_samples_per_second": 0.03,
"eval_steps_per_second": 0.01,
"eval_text_cosine_sim": 0.24709360301494598,
"step": 99
},
{
"epoch": 1.0,
"step": 99,
"total_flos": 2077734266890272.0,
"train_loss": 7.893082406785753,
"train_runtime": 1993.2394,
"train_samples_per_second": 0.793,
"train_steps_per_second": 0.05
}
],
"logging_steps": 1.0,
"max_steps": 99,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2077734266890272.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}