|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0547945205479452, |
|
"eval_steps": 1, |
|
"global_step": 1, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0547945205479452, |
|
"grad_norm": 17.049646377563477, |
|
"learning_rate": 4e-05, |
|
"loss": 6.851, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0547945205479452, |
|
"eval_NLI-v2_cosine_accuracy": 1.0, |
|
"eval_NLI-v2_dot_accuracy": 0.125, |
|
"eval_NLI-v2_euclidean_accuracy": 1.0, |
|
"eval_NLI-v2_manhattan_accuracy": 1.0, |
|
"eval_NLI-v2_max_accuracy": 1.0, |
|
"eval_VitaminC_cosine_accuracy": 0.55078125, |
|
"eval_VitaminC_cosine_accuracy_threshold": 0.9466925859451294, |
|
"eval_VitaminC_cosine_ap": 0.5153192822743842, |
|
"eval_VitaminC_cosine_f1": 0.6525198938992042, |
|
"eval_VitaminC_cosine_f1_threshold": 0.49584439396858215, |
|
"eval_VitaminC_cosine_precision": 0.484251968503937, |
|
"eval_VitaminC_cosine_recall": 1.0, |
|
"eval_VitaminC_dot_accuracy": 0.55078125, |
|
"eval_VitaminC_dot_accuracy_threshold": 417.46221923828125, |
|
"eval_VitaminC_dot_ap": 0.5127659553715838, |
|
"eval_VitaminC_dot_f1": 0.6525198938992042, |
|
"eval_VitaminC_dot_f1_threshold": 199.873291015625, |
|
"eval_VitaminC_dot_precision": 0.484251968503937, |
|
"eval_VitaminC_dot_recall": 1.0, |
|
"eval_VitaminC_euclidean_accuracy": 0.546875, |
|
"eval_VitaminC_euclidean_accuracy_threshold": 6.84520149230957, |
|
"eval_VitaminC_euclidean_ap": 0.5128797056139347, |
|
"eval_VitaminC_euclidean_f1": 0.6525198938992042, |
|
"eval_VitaminC_euclidean_f1_threshold": 20.29159164428711, |
|
"eval_VitaminC_euclidean_precision": 0.484251968503937, |
|
"eval_VitaminC_euclidean_recall": 1.0, |
|
"eval_VitaminC_manhattan_accuracy": 0.546875, |
|
"eval_VitaminC_manhattan_accuracy_threshold": 117.19680786132812, |
|
"eval_VitaminC_manhattan_ap": 0.5178540867523715, |
|
"eval_VitaminC_manhattan_f1": 0.6542553191489362, |
|
"eval_VitaminC_manhattan_f1_threshold": 292.8346252441406, |
|
"eval_VitaminC_manhattan_precision": 0.48616600790513836, |
|
"eval_VitaminC_manhattan_recall": 1.0, |
|
"eval_VitaminC_max_accuracy": 0.55078125, |
|
"eval_VitaminC_max_accuracy_threshold": 417.46221923828125, |
|
"eval_VitaminC_max_ap": 0.5178540867523715, |
|
"eval_VitaminC_max_f1": 0.6542553191489362, |
|
"eval_VitaminC_max_f1_threshold": 292.8346252441406, |
|
"eval_VitaminC_max_precision": 0.48616600790513836, |
|
"eval_VitaminC_max_recall": 1.0, |
|
"eval_sequential_score": 0.5178540867523715, |
|
"eval_sts-test_pearson_cosine": 0.01688864747186382, |
|
"eval_sts-test_pearson_dot": 0.16020781347065607, |
|
"eval_sts-test_pearson_euclidean": 0.02427633810266949, |
|
"eval_sts-test_pearson_manhattan": 0.05915868944955206, |
|
"eval_sts-test_pearson_max": 0.16020781347065607, |
|
"eval_sts-test_spearman_cosine": 0.0704767781934101, |
|
"eval_sts-test_spearman_dot": 0.19413812590183685, |
|
"eval_sts-test_spearman_euclidean": 0.0506775286593327, |
|
"eval_sts-test_spearman_manhattan": 0.07640924890718144, |
|
"eval_sts-test_spearman_max": 0.19413812590183685, |
|
"eval_vitaminc-pairs_loss": 2.7278947830200195, |
|
"eval_vitaminc-pairs_runtime": 1.5228, |
|
"eval_vitaminc-pairs_samples_per_second": 70.92, |
|
"eval_vitaminc-pairs_steps_per_second": 1.313, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0547945205479452, |
|
"eval_negation-triplets_loss": 5.259257793426514, |
|
"eval_negation-triplets_runtime": 0.3007, |
|
"eval_negation-triplets_samples_per_second": 212.85, |
|
"eval_negation-triplets_steps_per_second": 3.326, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0547945205479452, |
|
"eval_scitail-pairs-pos_loss": 1.9180195331573486, |
|
"eval_scitail-pairs-pos_runtime": 0.4022, |
|
"eval_scitail-pairs-pos_samples_per_second": 134.25, |
|
"eval_scitail-pairs-pos_steps_per_second": 2.486, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0547945205479452, |
|
"eval_xsum-pairs_loss": 6.389987945556641, |
|
"eval_xsum-pairs_runtime": 3.3036, |
|
"eval_xsum-pairs_samples_per_second": 38.746, |
|
"eval_xsum-pairs_steps_per_second": 0.605, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0547945205479452, |
|
"eval_sciq_pairs_loss": 1.1206029653549194, |
|
"eval_sciq_pairs_runtime": 3.3516, |
|
"eval_sciq_pairs_samples_per_second": 38.191, |
|
"eval_sciq_pairs_steps_per_second": 0.597, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0547945205479452, |
|
"eval_qasc_pairs_loss": 7.901282787322998, |
|
"eval_qasc_pairs_runtime": 0.6747, |
|
"eval_qasc_pairs_samples_per_second": 189.709, |
|
"eval_qasc_pairs_steps_per_second": 2.964, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0547945205479452, |
|
"eval_openbookqa_pairs_loss": 4.747707366943359, |
|
"eval_openbookqa_pairs_runtime": 0.5849, |
|
"eval_openbookqa_pairs_samples_per_second": 218.847, |
|
"eval_openbookqa_pairs_steps_per_second": 3.419, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0547945205479452, |
|
"eval_msmarco_pairs_loss": 10.60707950592041, |
|
"eval_msmarco_pairs_runtime": 1.2013, |
|
"eval_msmarco_pairs_samples_per_second": 106.551, |
|
"eval_msmarco_pairs_steps_per_second": 1.665, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0547945205479452, |
|
"eval_nq_pairs_loss": 10.446059226989746, |
|
"eval_nq_pairs_runtime": 2.7595, |
|
"eval_nq_pairs_samples_per_second": 46.385, |
|
"eval_nq_pairs_steps_per_second": 0.725, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0547945205479452, |
|
"eval_trivia_pairs_loss": 7.870224952697754, |
|
"eval_trivia_pairs_runtime": 3.9907, |
|
"eval_trivia_pairs_samples_per_second": 29.819, |
|
"eval_trivia_pairs_steps_per_second": 0.501, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0547945205479452, |
|
"eval_gooaq_pairs_loss": 8.126296997070312, |
|
"eval_gooaq_pairs_runtime": 0.8871, |
|
"eval_gooaq_pairs_samples_per_second": 144.296, |
|
"eval_gooaq_pairs_steps_per_second": 2.255, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0547945205479452, |
|
"eval_paws-pos_loss": 2.2177822589874268, |
|
"eval_paws-pos_runtime": 0.69, |
|
"eval_paws-pos_samples_per_second": 185.509, |
|
"eval_paws-pos_steps_per_second": 2.899, |
|
"step": 1 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 2, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 1, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 160, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|