dima806's picture
Upload folder using huggingface_hub
12ebfff verified
{
"best_metric": 1.0019118785858154,
"best_model_checkpoint": "fairface_age_image_detection/checkpoint-9376",
"epoch": 4.0,
"eval_steps": 500,
"global_step": 9376,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.21331058020477817,
"grad_norm": 2.756800413131714,
"learning_rate": 5.710486811065838e-06,
"loss": 1.7206,
"step": 500
},
{
"epoch": 0.42662116040955633,
"grad_norm": 3.085395097732544,
"learning_rate": 5.3888054900278794e-06,
"loss": 1.3151,
"step": 1000
},
{
"epoch": 0.6399317406143344,
"grad_norm": 3.533961057662964,
"learning_rate": 5.067124168989921e-06,
"loss": 1.212,
"step": 1500
},
{
"epoch": 0.8532423208191127,
"grad_norm": 5.493643283843994,
"learning_rate": 4.745442847951962e-06,
"loss": 1.145,
"step": 2000
},
{
"epoch": 1.0,
"eval_accuracy": 0.5652,
"eval_loss": 1.115075707435608,
"eval_model_preparation_time": 0.0037,
"eval_runtime": 903.327,
"eval_samples_per_second": 11.07,
"eval_steps_per_second": 1.384,
"step": 2344
},
{
"epoch": 1.0665529010238908,
"grad_norm": 5.28899621963501,
"learning_rate": 4.423761526914004e-06,
"loss": 1.1006,
"step": 2500
},
{
"epoch": 1.2798634812286689,
"grad_norm": 6.52961540222168,
"learning_rate": 4.102080205876046e-06,
"loss": 1.0519,
"step": 3000
},
{
"epoch": 1.493174061433447,
"grad_norm": 10.652497291564941,
"learning_rate": 3.7803988848380875e-06,
"loss": 1.0292,
"step": 3500
},
{
"epoch": 1.7064846416382253,
"grad_norm": 5.245483875274658,
"learning_rate": 3.4587175638001284e-06,
"loss": 1.016,
"step": 4000
},
{
"epoch": 1.9197952218430034,
"grad_norm": 4.070522308349609,
"learning_rate": 3.1370362427621706e-06,
"loss": 1.0056,
"step": 4500
},
{
"epoch": 2.0,
"eval_accuracy": 0.5831,
"eval_loss": 1.030907392501831,
"eval_model_preparation_time": 0.0037,
"eval_runtime": 917.4025,
"eval_samples_per_second": 10.9,
"eval_steps_per_second": 1.363,
"step": 4688
},
{
"epoch": 2.1331058020477816,
"grad_norm": 6.568565845489502,
"learning_rate": 2.815354921724212e-06,
"loss": 0.9796,
"step": 5000
},
{
"epoch": 2.34641638225256,
"grad_norm": 10.742039680480957,
"learning_rate": 2.4936736006862537e-06,
"loss": 0.9587,
"step": 5500
},
{
"epoch": 2.5597269624573378,
"grad_norm": 4.3592424392700195,
"learning_rate": 2.171992279648295e-06,
"loss": 0.9455,
"step": 6000
},
{
"epoch": 2.773037542662116,
"grad_norm": 5.696556091308594,
"learning_rate": 1.8503109586103366e-06,
"loss": 0.9363,
"step": 6500
},
{
"epoch": 2.986348122866894,
"grad_norm": 5.746116638183594,
"learning_rate": 1.5286296375723786e-06,
"loss": 0.9347,
"step": 7000
},
{
"epoch": 3.0,
"eval_accuracy": 0.5883,
"eval_loss": 1.0057746171951294,
"eval_model_preparation_time": 0.0037,
"eval_runtime": 926.1536,
"eval_samples_per_second": 10.797,
"eval_steps_per_second": 1.35,
"step": 7032
},
{
"epoch": 3.1996587030716723,
"grad_norm": 4.218583106994629,
"learning_rate": 1.20694831653442e-06,
"loss": 0.9035,
"step": 7500
},
{
"epoch": 3.4129692832764507,
"grad_norm": 6.724698543548584,
"learning_rate": 8.852669954964614e-07,
"loss": 0.9081,
"step": 8000
},
{
"epoch": 3.6262798634812285,
"grad_norm": 6.658480167388916,
"learning_rate": 5.635856744585031e-07,
"loss": 0.905,
"step": 8500
},
{
"epoch": 3.839590443686007,
"grad_norm": 4.382744789123535,
"learning_rate": 2.4190435342054474e-07,
"loss": 0.8969,
"step": 9000
},
{
"epoch": 4.0,
"eval_accuracy": 0.5892,
"eval_loss": 1.0019118785858154,
"eval_model_preparation_time": 0.0037,
"eval_runtime": 929.5968,
"eval_samples_per_second": 10.757,
"eval_steps_per_second": 1.345,
"step": 9376
}
],
"logging_steps": 500,
"max_steps": 9376,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.32490554103808e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}