|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.9391304347826086, |
|
"eval_steps": 4, |
|
"global_step": 28, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06956521739130435, |
|
"grad_norm": 0.24870926141738892, |
|
"learning_rate": 2e-05, |
|
"loss": 0.9615, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06956521739130435, |
|
"eval_loss": 0.9210608005523682, |
|
"eval_runtime": 74.7303, |
|
"eval_samples_per_second": 2.088, |
|
"eval_steps_per_second": 1.044, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1391304347826087, |
|
"grad_norm": 0.2108100801706314, |
|
"learning_rate": 4e-05, |
|
"loss": 0.7833, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.20869565217391303, |
|
"grad_norm": 0.1993255764245987, |
|
"learning_rate": 6e-05, |
|
"loss": 0.7999, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.2782608695652174, |
|
"grad_norm": 0.28567054867744446, |
|
"learning_rate": 8e-05, |
|
"loss": 0.8231, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.2782608695652174, |
|
"eval_loss": 0.9139525294303894, |
|
"eval_runtime": 75.4634, |
|
"eval_samples_per_second": 2.067, |
|
"eval_steps_per_second": 1.034, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.34782608695652173, |
|
"grad_norm": 0.2238488644361496, |
|
"learning_rate": 0.0001, |
|
"loss": 0.6855, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.41739130434782606, |
|
"grad_norm": 0.248153418302536, |
|
"learning_rate": 0.00012, |
|
"loss": 0.9259, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.48695652173913045, |
|
"grad_norm": 0.252511590719223, |
|
"learning_rate": 0.00014, |
|
"loss": 0.7668, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.5565217391304348, |
|
"grad_norm": 0.25402677059173584, |
|
"learning_rate": 0.00016, |
|
"loss": 0.779, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.5565217391304348, |
|
"eval_loss": 0.8034127354621887, |
|
"eval_runtime": 75.4244, |
|
"eval_samples_per_second": 2.068, |
|
"eval_steps_per_second": 1.034, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.6260869565217392, |
|
"grad_norm": 0.1748448610305786, |
|
"learning_rate": 0.00018, |
|
"loss": 0.5345, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.6956521739130435, |
|
"grad_norm": 0.18634748458862305, |
|
"learning_rate": 0.0002, |
|
"loss": 0.5699, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.7652173913043478, |
|
"grad_norm": 0.21753615140914917, |
|
"learning_rate": 0.00019848077530122083, |
|
"loss": 0.5804, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.8347826086956521, |
|
"grad_norm": 0.23333732783794403, |
|
"learning_rate": 0.00019396926207859084, |
|
"loss": 0.545, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.8347826086956521, |
|
"eval_loss": 0.648797869682312, |
|
"eval_runtime": 75.308, |
|
"eval_samples_per_second": 2.071, |
|
"eval_steps_per_second": 1.036, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.9043478260869565, |
|
"grad_norm": 0.20313376188278198, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 0.4392, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.9739130434782609, |
|
"grad_norm": 0.20026056468486786, |
|
"learning_rate": 0.0001766044443118978, |
|
"loss": 0.38, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.0347826086956522, |
|
"grad_norm": 0.153587207198143, |
|
"learning_rate": 0.00016427876096865394, |
|
"loss": 0.3177, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.1043478260869566, |
|
"grad_norm": 0.16564497351646423, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.3712, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.1043478260869566, |
|
"eval_loss": 0.5825337767601013, |
|
"eval_runtime": 75.4662, |
|
"eval_samples_per_second": 2.067, |
|
"eval_steps_per_second": 1.034, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.1739130434782608, |
|
"grad_norm": 0.1881798803806305, |
|
"learning_rate": 0.00013420201433256689, |
|
"loss": 0.3357, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.2434782608695651, |
|
"grad_norm": 0.21584919095039368, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 0.3875, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.3130434782608695, |
|
"grad_norm": 0.1652679145336151, |
|
"learning_rate": 0.0001, |
|
"loss": 0.287, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.382608695652174, |
|
"grad_norm": 0.1664346307516098, |
|
"learning_rate": 8.263518223330697e-05, |
|
"loss": 0.336, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.382608695652174, |
|
"eval_loss": 0.5491526126861572, |
|
"eval_runtime": 75.3653, |
|
"eval_samples_per_second": 2.07, |
|
"eval_steps_per_second": 1.035, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.4521739130434783, |
|
"grad_norm": 0.15922914445400238, |
|
"learning_rate": 6.579798566743314e-05, |
|
"loss": 0.2959, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.5217391304347827, |
|
"grad_norm": 0.14202940464019775, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.2983, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.591304347826087, |
|
"grad_norm": 0.1265108287334442, |
|
"learning_rate": 3.5721239031346066e-05, |
|
"loss": 0.247, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.6608695652173913, |
|
"grad_norm": 0.15941433608531952, |
|
"learning_rate": 2.339555568810221e-05, |
|
"loss": 0.3231, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.6608695652173913, |
|
"eval_loss": 0.537909984588623, |
|
"eval_runtime": 75.2911, |
|
"eval_samples_per_second": 2.072, |
|
"eval_steps_per_second": 1.036, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.7304347826086957, |
|
"grad_norm": 0.1614454984664917, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 0.311, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"grad_norm": 0.13931332528591156, |
|
"learning_rate": 6.030737921409169e-06, |
|
"loss": 0.2731, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.8695652173913042, |
|
"grad_norm": 0.15171031653881073, |
|
"learning_rate": 1.5192246987791981e-06, |
|
"loss": 0.2874, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.9391304347826086, |
|
"grad_norm": 0.14994187653064728, |
|
"learning_rate": 0.0, |
|
"loss": 0.2729, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.9391304347826086, |
|
"eval_loss": 0.5353411436080933, |
|
"eval_runtime": 75.3284, |
|
"eval_samples_per_second": 2.071, |
|
"eval_steps_per_second": 1.035, |
|
"step": 28 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 28, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 14, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.149946891848909e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|