chinese_danmaku_roberta / trainer_state.json
WUJUNCHAO's picture
add chinese roberta model for danmaku
91cf67b
raw
history blame contribute delete
No virus
5.11 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"global_step": 18927,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"learning_rate": 4.867913562635389e-05,
"loss": 1.2978,
"step": 500
},
{
"epoch": 0.16,
"learning_rate": 4.735827125270778e-05,
"loss": 1.2005,
"step": 1000
},
{
"epoch": 0.24,
"learning_rate": 4.603740687906166e-05,
"loss": 1.1692,
"step": 1500
},
{
"epoch": 0.32,
"learning_rate": 4.471654250541555e-05,
"loss": 1.1418,
"step": 2000
},
{
"epoch": 0.4,
"learning_rate": 4.339567813176943e-05,
"loss": 1.1284,
"step": 2500
},
{
"epoch": 0.48,
"learning_rate": 4.2074813758123315e-05,
"loss": 1.105,
"step": 3000
},
{
"epoch": 0.55,
"learning_rate": 4.07539493844772e-05,
"loss": 1.1058,
"step": 3500
},
{
"epoch": 0.63,
"learning_rate": 3.943308501083109e-05,
"loss": 1.0819,
"step": 4000
},
{
"epoch": 0.71,
"learning_rate": 3.811222063718498e-05,
"loss": 1.0774,
"step": 4500
},
{
"epoch": 0.79,
"learning_rate": 3.6793997992286154e-05,
"loss": 1.0645,
"step": 5000
},
{
"epoch": 0.87,
"learning_rate": 3.547313361864004e-05,
"loss": 1.059,
"step": 5500
},
{
"epoch": 0.95,
"learning_rate": 3.415226924499392e-05,
"loss": 1.0522,
"step": 6000
},
{
"epoch": 1.03,
"learning_rate": 3.283140487134781e-05,
"loss": 1.0476,
"step": 6500
},
{
"epoch": 1.11,
"learning_rate": 3.15105404977017e-05,
"loss": 1.0402,
"step": 7000
},
{
"epoch": 1.19,
"learning_rate": 3.0189676124055582e-05,
"loss": 1.0404,
"step": 7500
},
{
"epoch": 1.27,
"learning_rate": 2.886881175040947e-05,
"loss": 1.0339,
"step": 8000
},
{
"epoch": 1.35,
"learning_rate": 2.7547947376763355e-05,
"loss": 1.0283,
"step": 8500
},
{
"epoch": 1.43,
"learning_rate": 2.6229724731864537e-05,
"loss": 1.0299,
"step": 9000
},
{
"epoch": 1.51,
"learning_rate": 2.4908860358218418e-05,
"loss": 1.019,
"step": 9500
},
{
"epoch": 1.59,
"learning_rate": 2.3587995984572305e-05,
"loss": 1.0144,
"step": 10000
},
{
"epoch": 1.66,
"learning_rate": 2.2269773339673484e-05,
"loss": 1.0099,
"step": 10500
},
{
"epoch": 1.74,
"learning_rate": 2.094890896602737e-05,
"loss": 1.0064,
"step": 11000
},
{
"epoch": 1.82,
"learning_rate": 1.9628044592381256e-05,
"loss": 0.9982,
"step": 11500
},
{
"epoch": 1.9,
"learning_rate": 1.830718021873514e-05,
"loss": 0.9962,
"step": 12000
},
{
"epoch": 1.98,
"learning_rate": 1.6986315845089025e-05,
"loss": 1.0024,
"step": 12500
},
{
"epoch": 2.06,
"learning_rate": 1.5665451471442913e-05,
"loss": 1.0024,
"step": 13000
},
{
"epoch": 2.14,
"learning_rate": 1.4344587097796799e-05,
"loss": 0.9936,
"step": 13500
},
{
"epoch": 2.22,
"learning_rate": 1.3026364452897977e-05,
"loss": 0.9988,
"step": 14000
},
{
"epoch": 2.3,
"learning_rate": 1.1705500079251864e-05,
"loss": 0.9902,
"step": 14500
},
{
"epoch": 2.38,
"learning_rate": 1.0384635705605748e-05,
"loss": 0.9885,
"step": 15000
},
{
"epoch": 2.46,
"learning_rate": 9.066413060706927e-06,
"loss": 0.9836,
"step": 15500
},
{
"epoch": 2.54,
"learning_rate": 7.745548687060814e-06,
"loss": 0.9866,
"step": 16000
},
{
"epoch": 2.62,
"learning_rate": 6.424684313414699e-06,
"loss": 0.9871,
"step": 16500
},
{
"epoch": 2.69,
"learning_rate": 5.103819939768585e-06,
"loss": 0.992,
"step": 17000
},
{
"epoch": 2.77,
"learning_rate": 3.782955566122471e-06,
"loss": 0.9804,
"step": 17500
},
{
"epoch": 2.85,
"learning_rate": 2.462091192476357e-06,
"loss": 0.9887,
"step": 18000
},
{
"epoch": 2.93,
"learning_rate": 1.1412268188302427e-06,
"loss": 0.9785,
"step": 18500
},
{
"epoch": 3.0,
"step": 18927,
"total_flos": 9.490460801340211e+16,
"train_loss": 1.0424538015431204,
"train_runtime": 40144.3005,
"train_samples_per_second": 30.173,
"train_steps_per_second": 0.471
}
],
"max_steps": 18927,
"num_train_epochs": 3,
"total_flos": 9.490460801340211e+16,
"trial_name": null,
"trial_params": null
}