h-iida's picture
Upload folder using huggingface_hub
1c348c8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.10552975939214859,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005276487969607429,
"grad_norm": 0.6827425360679626,
"learning_rate": 2.9947235120303926e-05,
"loss": 2.207,
"step": 50
},
{
"epoch": 0.010552975939214858,
"grad_norm": 0.5957860350608826,
"learning_rate": 2.989447024060785e-05,
"loss": 0.2398,
"step": 100
},
{
"epoch": 0.015829463908822287,
"grad_norm": 0.46854081749916077,
"learning_rate": 2.9841705360911777e-05,
"loss": 0.0525,
"step": 150
},
{
"epoch": 0.021105951878429716,
"grad_norm": 0.10302311927080154,
"learning_rate": 2.9788940481215702e-05,
"loss": 0.0037,
"step": 200
},
{
"epoch": 0.026382439848037148,
"grad_norm": 0.01405419036746025,
"learning_rate": 2.9736175601519628e-05,
"loss": 0.001,
"step": 250
},
{
"epoch": 0.031658927817644573,
"grad_norm": 0.12790632247924805,
"learning_rate": 2.9683410721823553e-05,
"loss": 0.0007,
"step": 300
},
{
"epoch": 0.036935415787252006,
"grad_norm": 0.0026043676771223545,
"learning_rate": 2.9630645842127478e-05,
"loss": 0.0004,
"step": 350
},
{
"epoch": 0.04221190375685943,
"grad_norm": 0.030596332624554634,
"learning_rate": 2.9577880962431404e-05,
"loss": 0.0016,
"step": 400
},
{
"epoch": 0.047488391726466864,
"grad_norm": 0.04252925142645836,
"learning_rate": 2.952511608273533e-05,
"loss": 0.0008,
"step": 450
},
{
"epoch": 0.052764879696074296,
"grad_norm": 0.03828505799174309,
"learning_rate": 2.947235120303926e-05,
"loss": 0.0003,
"step": 500
},
{
"epoch": 0.05804136766568172,
"grad_norm": 0.006123501807451248,
"learning_rate": 2.9419586323343186e-05,
"loss": 0.0004,
"step": 550
},
{
"epoch": 0.06331785563528915,
"grad_norm": 0.0026047523133456707,
"learning_rate": 2.9366821443647112e-05,
"loss": 0.0002,
"step": 600
},
{
"epoch": 0.06859434360489658,
"grad_norm": 0.0018419412663206458,
"learning_rate": 2.9314056563951037e-05,
"loss": 0.0002,
"step": 650
},
{
"epoch": 0.07387083157450401,
"grad_norm": 0.03250932693481445,
"learning_rate": 2.9261291684254962e-05,
"loss": 0.0009,
"step": 700
},
{
"epoch": 0.07914731954411144,
"grad_norm": 0.001018165610730648,
"learning_rate": 2.9208526804558888e-05,
"loss": 0.0003,
"step": 750
},
{
"epoch": 0.08442380751371886,
"grad_norm": 0.0010480296332389116,
"learning_rate": 2.9155761924862813e-05,
"loss": 0.0002,
"step": 800
},
{
"epoch": 0.0897002954833263,
"grad_norm": 0.000887015659827739,
"learning_rate": 2.910299704516674e-05,
"loss": 0.0001,
"step": 850
},
{
"epoch": 0.09497678345293373,
"grad_norm": 0.02170419879257679,
"learning_rate": 2.9050232165470664e-05,
"loss": 0.0001,
"step": 900
},
{
"epoch": 0.10025327142254116,
"grad_norm": 0.02256314642727375,
"learning_rate": 2.899746728577459e-05,
"loss": 0.0002,
"step": 950
},
{
"epoch": 0.10552975939214859,
"grad_norm": 0.007039123214781284,
"learning_rate": 2.8944702406078515e-05,
"loss": 0.0001,
"step": 1000
}
],
"logging_steps": 50,
"max_steps": 28428,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 128,
"trial_name": null,
"trial_params": null
}