Nekochu's picture
Add QLoRA_french_pt + sft
667cb16 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 7.0,
"eval_steps": 500,
"global_step": 126,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.5555555555555556,
"grad_norm": 0.5622067451477051,
"learning_rate": 4.922693215572695e-05,
"loss": 2.359,
"num_input_tokens_seen": 81920,
"step": 10
},
{
"epoch": 1.1111111111111112,
"grad_norm": 0.5337382555007935,
"learning_rate": 4.6955539334255716e-05,
"loss": 2.3166,
"num_input_tokens_seen": 163840,
"step": 20
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.6496122479438782,
"learning_rate": 4.332629679574566e-05,
"loss": 2.2205,
"num_input_tokens_seen": 245760,
"step": 30
},
{
"epoch": 2.2222222222222223,
"grad_norm": 0.6310074329376221,
"learning_rate": 3.856365659664399e-05,
"loss": 2.159,
"num_input_tokens_seen": 327680,
"step": 40
},
{
"epoch": 2.7777777777777777,
"grad_norm": 1.2455250024795532,
"learning_rate": 3.2962166256292113e-05,
"loss": 2.0625,
"num_input_tokens_seen": 409600,
"step": 50
},
{
"epoch": 3.3333333333333335,
"grad_norm": 2.0558948516845703,
"learning_rate": 2.686825233966061e-05,
"loss": 2.0294,
"num_input_tokens_seen": 491520,
"step": 60
},
{
"epoch": 3.888888888888889,
"grad_norm": 1.1326656341552734,
"learning_rate": 2.0658795558326743e-05,
"loss": 1.9556,
"num_input_tokens_seen": 573440,
"step": 70
},
{
"epoch": 4.444444444444445,
"grad_norm": 1.0931998491287231,
"learning_rate": 1.4717822421734718e-05,
"loss": 1.8462,
"num_input_tokens_seen": 655360,
"step": 80
},
{
"epoch": 5.0,
"grad_norm": 1.1948561668395996,
"learning_rate": 9.412754953531663e-06,
"loss": 1.8544,
"num_input_tokens_seen": 737280,
"step": 90
},
{
"epoch": 5.555555555555555,
"grad_norm": 1.3811908960342407,
"learning_rate": 5.071687319426946e-06,
"loss": 1.7957,
"num_input_tokens_seen": 819200,
"step": 100
},
{
"epoch": 6.111111111111111,
"grad_norm": 1.2763166427612305,
"learning_rate": 1.9630947032398067e-06,
"loss": 1.7244,
"num_input_tokens_seen": 901120,
"step": 110
},
{
"epoch": 6.666666666666667,
"grad_norm": 1.3015738725662231,
"learning_rate": 2.7922934437178695e-07,
"loss": 1.7368,
"num_input_tokens_seen": 983040,
"step": 120
},
{
"epoch": 7.0,
"num_input_tokens_seen": 1032192,
"step": 126,
"total_flos": 4.69986584863703e+16,
"train_loss": 1.9939293407258534,
"train_runtime": 612.6473,
"train_samples_per_second": 0.206,
"train_steps_per_second": 0.206
}
],
"logging_steps": 10,
"max_steps": 126,
"num_input_tokens_seen": 1032192,
"num_train_epochs": 7,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.69986584863703e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}