diff --git a/checkpoint-100/README.md b/checkpoint-100/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-100/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-100/adapter_config.json b/checkpoint-100/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-100/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-100/adapter_model.safetensors b/checkpoint-100/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a9525c4e1b8939404967d8b0d66dfbb246c143a1 --- /dev/null +++ b/checkpoint-100/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3096de35686223fab2a9df030e7bf67449a2ac73a4893f1fe37c065bf658bb4f +size 104873776 diff --git a/checkpoint-100/optimizer.pt b/checkpoint-100/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..b379e597d26cfe2360761f5ec29885e73bad2415 --- /dev/null +++ b/checkpoint-100/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:422703e3cc3f3e7cb340560e547879521dc77bcf18172dadea4533f52e051921 +size 52680378 diff --git a/checkpoint-100/rng_state.pth b/checkpoint-100/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..93591ce6e4601003aebf0928941ffb041cfa68a6 --- /dev/null +++ b/checkpoint-100/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22877d51b3681bdaead8e3db9525f16f1d0bcc5a62cc8e2e3baa2effc2b68289 +size 14244 diff --git a/checkpoint-100/scheduler.pt b/checkpoint-100/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..139e18dbc19f81f3bc6380ac8ae84c4fe54c0467 --- /dev/null +++ b/checkpoint-100/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92441db39511d89dce542ecb6aba421c47cfcefd7a75d4164e2e911229478ceb +size 1064 diff --git a/checkpoint-100/trainer_state.json b/checkpoint-100/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..6a32698a9801c2172cbdd2b3725f307a67d41fa0 --- /dev/null +++ b/checkpoint-100/trainer_state.json @@ -0,0 +1,77 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.13333333333333333, + "eval_steps": 25, + "global_step": 100, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 1643413241856000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-100/training_args.bin b/checkpoint-100/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-100/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-125/README.md b/checkpoint-125/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-125/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-125/adapter_config.json b/checkpoint-125/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-125/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-125/adapter_model.safetensors b/checkpoint-125/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..2cfe137c6b31ec6df1379f6bfb037c01ed819e27 --- /dev/null +++ b/checkpoint-125/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d267df191f5204fa6e0bc6290151f2c310160b72799c61cfbd0d360f639e519 +size 104873776 diff --git a/checkpoint-125/optimizer.pt b/checkpoint-125/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..85061802955c642022acb8c1ba2767ba1f4a145f --- /dev/null +++ b/checkpoint-125/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ffd85289e52d0a5109094693a96cdb35494ab697d265e600c295f7bb5c8766d +size 52680378 diff --git a/checkpoint-125/rng_state.pth b/checkpoint-125/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..7ec4973439d8f60948a487b6b1fe1ff2c486153e --- /dev/null +++ b/checkpoint-125/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec4355ca8c7a3745700fe5c34fd421e8d6eb401669abfe1d96bfd6ad0442124a +size 14244 diff --git a/checkpoint-125/scheduler.pt b/checkpoint-125/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..8839fea0df6b09211e891d2bb60a7e099885c8bf --- /dev/null +++ b/checkpoint-125/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ca61dc3978f7d5400fcdf426e1ee63051e419549109531508ba04513d698952 +size 1064 diff --git a/checkpoint-125/trainer_state.json b/checkpoint-125/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..b4fd2ab420b9bc9107b1d2b4ea5b5323072b5549 --- /dev/null +++ b/checkpoint-125/trainer_state.json @@ -0,0 +1,91 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.16666666666666666, + "eval_steps": 25, + "global_step": 125, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 2054266552320000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-125/training_args.bin b/checkpoint-125/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-125/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-150/README.md b/checkpoint-150/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-150/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-150/adapter_config.json b/checkpoint-150/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-150/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-150/adapter_model.safetensors b/checkpoint-150/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..63c7c7e5ec7bd2dea3b0ec425624968490f0abcf --- /dev/null +++ b/checkpoint-150/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52c7a97cd0490630bcc3f78318a16cd1c68aa3173ccd53bff0429227e40aa4b0 +size 104873776 diff --git a/checkpoint-150/optimizer.pt b/checkpoint-150/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..0236107803be5706a1131210d502164034b78263 --- /dev/null +++ b/checkpoint-150/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bce733ec605ce872820df65be03f7e8b9115ed92ebf8e3eed002a00e69e0cf30 +size 52680378 diff --git a/checkpoint-150/rng_state.pth b/checkpoint-150/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..2a9ba018d0bc7ae78dd256cfd41395d44d9f18a8 --- /dev/null +++ b/checkpoint-150/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4d8e33fda55628eabbc66fdb575198c2ec24d18faf23efa712c2864445c109c +size 14244 diff --git a/checkpoint-150/scheduler.pt b/checkpoint-150/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..152f9f4f132b8491e295d73e9c1f754aa52ad8ea --- /dev/null +++ b/checkpoint-150/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccceb5409a2f589d3f6c1db287698febf9573d0daa3ed1fb097a927fa91b6420 +size 1064 diff --git a/checkpoint-150/trainer_state.json b/checkpoint-150/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..b2efd6f6588e28d790fd483312e302f8301bed80 --- /dev/null +++ b/checkpoint-150/trainer_state.json @@ -0,0 +1,105 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.2, + "eval_steps": 25, + "global_step": 150, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 2465119862784000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-150/training_args.bin b/checkpoint-150/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-150/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-175/README.md b/checkpoint-175/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-175/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-175/adapter_config.json b/checkpoint-175/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-175/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-175/adapter_model.safetensors b/checkpoint-175/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b50ab5b6fd2da313242c8c11ba2f1e5815860468 --- /dev/null +++ b/checkpoint-175/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c0736dadd7215d1c083e40079cb5184f8fed61fd3bd4b0130058290adf43991 +size 104873776 diff --git a/checkpoint-175/optimizer.pt b/checkpoint-175/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..399a589796711dc053795c7468ef361daffe6cee --- /dev/null +++ b/checkpoint-175/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:598dc68308de694ee6ab307ea90d0e0fd54cfd96e76263b9a113659d2421c331 +size 52680378 diff --git a/checkpoint-175/rng_state.pth b/checkpoint-175/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..c154917c1896e8a7bde9571b7af47448e1901bda --- /dev/null +++ b/checkpoint-175/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bb84e6749c2f3a01aa93bb663c601ce6a7370ff07da3ea716dc140733bb10e0 +size 14244 diff --git a/checkpoint-175/scheduler.pt b/checkpoint-175/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..7017018af7c4fe5e76dc817e24623e20e1d08ad5 --- /dev/null +++ b/checkpoint-175/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a34410ad021defcd5908748f1e45902861a7469af78faa748a15a11fa9749909 +size 1064 diff --git a/checkpoint-175/trainer_state.json b/checkpoint-175/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..5713081730c04d910ca69860b8e3bc0fa798ab0f --- /dev/null +++ b/checkpoint-175/trainer_state.json @@ -0,0 +1,119 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.23333333333333334, + "eval_steps": 25, + "global_step": 175, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + }, + { + "epoch": 0.23, + "learning_rate": 1.628256513026052e-05, + "loss": 1.6657, + "step": 175 + }, + { + "epoch": 0.23, + "eval_loss": 1.7578012943267822, + "eval_runtime": 13.4284, + "eval_samples_per_second": 18.617, + "eval_steps_per_second": 2.383, + "step": 175 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 2875973173248000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-175/training_args.bin b/checkpoint-175/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-175/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-200/README.md b/checkpoint-200/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-200/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-200/adapter_config.json b/checkpoint-200/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-200/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-200/adapter_model.safetensors b/checkpoint-200/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..73fa1154ffb131c2d22d0a3ce4992cb170ba2968 --- /dev/null +++ b/checkpoint-200/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:304745872e892162b79b6c9d51278a1ce2e64c0c8a8ba5bb03607fff8192487c +size 104873776 diff --git a/checkpoint-200/optimizer.pt b/checkpoint-200/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..677f1d39f3f31d3315b9984df1dae8e81af8e916 --- /dev/null +++ b/checkpoint-200/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b95323bd86089360bbe555ddbebf5fa577e764e9b9e45c24ed0925c0c12e61d +size 52680378 diff --git a/checkpoint-200/rng_state.pth b/checkpoint-200/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..5f932a1ecc00b92514c72c1f064d6130fed2c387 --- /dev/null +++ b/checkpoint-200/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d72c9a4ea7ce611e0a9dafeeecea09728d0b5d46ca0595b9e458403d106e033d +size 14244 diff --git a/checkpoint-200/scheduler.pt b/checkpoint-200/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..bc44f1e1f3a607753ea09782250be2d5370d4f17 --- /dev/null +++ b/checkpoint-200/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:065a00dd19cd52a6fcd328af6948b9b16901e53c55bdcdbac2210639d7a19c59 +size 1064 diff --git a/checkpoint-200/trainer_state.json b/checkpoint-200/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..d0e10b0bf43af16e1f2b79abe6729d0be78e1e04 --- /dev/null +++ b/checkpoint-200/trainer_state.json @@ -0,0 +1,133 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.26666666666666666, + "eval_steps": 25, + "global_step": 200, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + }, + { + "epoch": 0.23, + "learning_rate": 1.628256513026052e-05, + "loss": 1.6657, + "step": 175 + }, + { + "epoch": 0.23, + "eval_loss": 1.7578012943267822, + "eval_runtime": 13.4284, + "eval_samples_per_second": 18.617, + "eval_steps_per_second": 2.383, + "step": 175 + }, + { + "epoch": 0.27, + "learning_rate": 1.5030060120240483e-05, + "loss": 1.6197, + "step": 200 + }, + { + "epoch": 0.27, + "eval_loss": 1.7506276369094849, + "eval_runtime": 13.5018, + "eval_samples_per_second": 18.516, + "eval_steps_per_second": 2.37, + "step": 200 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 3286826483712000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-200/training_args.bin b/checkpoint-200/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-200/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-225/README.md b/checkpoint-225/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-225/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-225/adapter_config.json b/checkpoint-225/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-225/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-225/adapter_model.safetensors b/checkpoint-225/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..02ab59fb32f44b8556fa129477d1fc908eb47a02 --- /dev/null +++ b/checkpoint-225/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f8608d8013bffaac96dfe92fe2531d5014926ad2cc6bc981a6c621c89d7f362 +size 104873776 diff --git a/checkpoint-225/optimizer.pt b/checkpoint-225/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..f659410250bf17c703f7ea96f86c722e5dad4b17 --- /dev/null +++ b/checkpoint-225/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ee26299da84d26e1cf03de537d95758314f59bdd974fd6490785682ec41d88f +size 52680378 diff --git a/checkpoint-225/rng_state.pth b/checkpoint-225/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..0111468fcd4fdedc1141d5367d02b1caae84fe0c --- /dev/null +++ b/checkpoint-225/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48d812fc903c3d98e26eae4aef45e6909db2b35458229ab38200c356e7d8699f +size 14244 diff --git a/checkpoint-225/scheduler.pt b/checkpoint-225/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..9b4ae150db748b946163a2e361a6ac883d6379c7 --- /dev/null +++ b/checkpoint-225/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4de1314b44379c7eabbc14eef0d3bfe970787e4c6e6bc52105d5860db4cedfc +size 1064 diff --git a/checkpoint-225/trainer_state.json b/checkpoint-225/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..e34c9f2c5768b2d2e6da53842b7f1705d27cc68c --- /dev/null +++ b/checkpoint-225/trainer_state.json @@ -0,0 +1,147 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.3, + "eval_steps": 25, + "global_step": 225, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + }, + { + "epoch": 0.23, + "learning_rate": 1.628256513026052e-05, + "loss": 1.6657, + "step": 175 + }, + { + "epoch": 0.23, + "eval_loss": 1.7578012943267822, + "eval_runtime": 13.4284, + "eval_samples_per_second": 18.617, + "eval_steps_per_second": 2.383, + "step": 175 + }, + { + "epoch": 0.27, + "learning_rate": 1.5030060120240483e-05, + "loss": 1.6197, + "step": 200 + }, + { + "epoch": 0.27, + "eval_loss": 1.7506276369094849, + "eval_runtime": 13.5018, + "eval_samples_per_second": 18.516, + "eval_steps_per_second": 2.37, + "step": 200 + }, + { + "epoch": 0.3, + "learning_rate": 1.3777555110220442e-05, + "loss": 1.7643, + "step": 225 + }, + { + "epoch": 0.3, + "eval_loss": 1.745801568031311, + "eval_runtime": 13.5003, + "eval_samples_per_second": 18.518, + "eval_steps_per_second": 2.37, + "step": 225 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 3697679794176000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-225/training_args.bin b/checkpoint-225/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-225/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-25/README.md b/checkpoint-25/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-25/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-25/adapter_config.json b/checkpoint-25/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-25/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-25/adapter_model.safetensors b/checkpoint-25/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a31046871b06c0dc65270fc8f997bb850d29d733 --- /dev/null +++ b/checkpoint-25/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c82e65773effe406f64f32ac8978d3f5203cb89b09a1e6ef54af0f1e85c50377 +size 104873776 diff --git a/checkpoint-25/optimizer.pt b/checkpoint-25/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..efd38922d48c17415050f2d2b7eed4ff77f1633d --- /dev/null +++ b/checkpoint-25/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c28dd0e234267f8dfaef563d43095caa82e8f3aba495d22a110accf1ce82846 +size 52680378 diff --git a/checkpoint-25/rng_state.pth b/checkpoint-25/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..34cbf2e5dae0e0fa203f35c1eed4196b1156eb1d --- /dev/null +++ b/checkpoint-25/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f7ab6b5fb768b77da9cd777e06eb0059fbbe9e604445918c039f6cc51f7423c +size 14244 diff --git a/checkpoint-25/scheduler.pt b/checkpoint-25/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..fb841b21317f07ef34c8a70c2a54c60e29b3d5c6 --- /dev/null +++ b/checkpoint-25/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb0613a524b069ccdfb886504674d1558c1558c2a96f546d9b07ab63c397c1eb +size 1064 diff --git a/checkpoint-25/trainer_state.json b/checkpoint-25/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..855f9dd9b21dddc65e53f9ac36d8003ad75e1799 --- /dev/null +++ b/checkpoint-25/trainer_state.json @@ -0,0 +1,35 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.03333333333333333, + "eval_steps": 25, + "global_step": 25, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 410853310464000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-25/training_args.bin b/checkpoint-25/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-25/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-250/README.md b/checkpoint-250/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-250/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-250/adapter_config.json b/checkpoint-250/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-250/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-250/adapter_model.safetensors b/checkpoint-250/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..64c73ee5b91d2c37be241168dde999489b6b955a --- /dev/null +++ b/checkpoint-250/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59017988014e00ddf367e59afbd540a35ee54d9399d4f795e2ba6bed0ba098d7 +size 104873776 diff --git a/checkpoint-250/optimizer.pt b/checkpoint-250/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..958498c4aabc8725a949aac028530d1d1dc1250b --- /dev/null +++ b/checkpoint-250/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c4fda8ab6a73d801f36c378680f384500490f4de93efca32562a81872c15c25 +size 52680378 diff --git a/checkpoint-250/rng_state.pth b/checkpoint-250/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..3b64269195bab0f8221f52ec724aabafb3e807a3 --- /dev/null +++ b/checkpoint-250/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:047e80e71b677d2cec7c524aea4c4f8ad4acc212b17ce239334642cee8f15fab +size 14244 diff --git a/checkpoint-250/scheduler.pt b/checkpoint-250/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..f7dba319720c7830ff996729858e78c11189a5a5 --- /dev/null +++ b/checkpoint-250/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6ab603862849e591ef87edf8631a784fdb9b2b7e6ce2b5df26a05c59fbf8b86 +size 1064 diff --git a/checkpoint-250/trainer_state.json b/checkpoint-250/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..833bcf689d557b24c722fb0f002ea843812d6ee9 --- /dev/null +++ b/checkpoint-250/trainer_state.json @@ -0,0 +1,161 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.3333333333333333, + "eval_steps": 25, + "global_step": 250, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + }, + { + "epoch": 0.23, + "learning_rate": 1.628256513026052e-05, + "loss": 1.6657, + "step": 175 + }, + { + "epoch": 0.23, + "eval_loss": 1.7578012943267822, + "eval_runtime": 13.4284, + "eval_samples_per_second": 18.617, + "eval_steps_per_second": 2.383, + "step": 175 + }, + { + "epoch": 0.27, + "learning_rate": 1.5030060120240483e-05, + "loss": 1.6197, + "step": 200 + }, + { + "epoch": 0.27, + "eval_loss": 1.7506276369094849, + "eval_runtime": 13.5018, + "eval_samples_per_second": 18.516, + "eval_steps_per_second": 2.37, + "step": 200 + }, + { + "epoch": 0.3, + "learning_rate": 1.3777555110220442e-05, + "loss": 1.7643, + "step": 225 + }, + { + "epoch": 0.3, + "eval_loss": 1.745801568031311, + "eval_runtime": 13.5003, + "eval_samples_per_second": 18.518, + "eval_steps_per_second": 2.37, + "step": 225 + }, + { + "epoch": 0.33, + "learning_rate": 1.25250501002004e-05, + "loss": 1.6707, + "step": 250 + }, + { + "epoch": 0.33, + "eval_loss": 1.7450388669967651, + "eval_runtime": 13.3223, + "eval_samples_per_second": 18.765, + "eval_steps_per_second": 2.402, + "step": 250 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 4108533104640000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-250/training_args.bin b/checkpoint-250/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-250/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-275/README.md b/checkpoint-275/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-275/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-275/adapter_config.json b/checkpoint-275/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-275/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-275/adapter_model.safetensors b/checkpoint-275/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..05f387d2de5346313b7646fe33cc4c9fe18856d1 --- /dev/null +++ b/checkpoint-275/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6461effddb9118151b5d3e6d2cb73eb07d5495f04e6b76647edaa00679546ddd +size 104873776 diff --git a/checkpoint-275/optimizer.pt b/checkpoint-275/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..69b2a52370a4454bec5cb2159921c8322969f5e5 --- /dev/null +++ b/checkpoint-275/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20a18e381cb1960ab43d09cda96f2d184380f2b95798199562694a434bc1a94a +size 52680506 diff --git a/checkpoint-275/rng_state.pth b/checkpoint-275/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..adca888a011c734fef8b93f31631d763c79874f9 --- /dev/null +++ b/checkpoint-275/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1f5bdd74388dd3ad2228dfd21d3d1c332664ecddc852a5e3bfaaa730408512a +size 14244 diff --git a/checkpoint-275/scheduler.pt b/checkpoint-275/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..e7624de6e2dc3393ed5dcd3e92a06d1b1a13da77 --- /dev/null +++ b/checkpoint-275/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e38749d83fc310b25daafdc653ce654325ff528dbbec44aac53a8858f9fdc6b +size 1064 diff --git a/checkpoint-275/trainer_state.json b/checkpoint-275/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..5a345f6192f153ae93726272fc54ca7184f78ccf --- /dev/null +++ b/checkpoint-275/trainer_state.json @@ -0,0 +1,175 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.36666666666666664, + "eval_steps": 25, + "global_step": 275, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + }, + { + "epoch": 0.23, + "learning_rate": 1.628256513026052e-05, + "loss": 1.6657, + "step": 175 + }, + { + "epoch": 0.23, + "eval_loss": 1.7578012943267822, + "eval_runtime": 13.4284, + "eval_samples_per_second": 18.617, + "eval_steps_per_second": 2.383, + "step": 175 + }, + { + "epoch": 0.27, + "learning_rate": 1.5030060120240483e-05, + "loss": 1.6197, + "step": 200 + }, + { + "epoch": 0.27, + "eval_loss": 1.7506276369094849, + "eval_runtime": 13.5018, + "eval_samples_per_second": 18.516, + "eval_steps_per_second": 2.37, + "step": 200 + }, + { + "epoch": 0.3, + "learning_rate": 1.3777555110220442e-05, + "loss": 1.7643, + "step": 225 + }, + { + "epoch": 0.3, + "eval_loss": 1.745801568031311, + "eval_runtime": 13.5003, + "eval_samples_per_second": 18.518, + "eval_steps_per_second": 2.37, + "step": 225 + }, + { + "epoch": 0.33, + "learning_rate": 1.25250501002004e-05, + "loss": 1.6707, + "step": 250 + }, + { + "epoch": 0.33, + "eval_loss": 1.7450388669967651, + "eval_runtime": 13.3223, + "eval_samples_per_second": 18.765, + "eval_steps_per_second": 2.402, + "step": 250 + }, + { + "epoch": 0.37, + "learning_rate": 1.1272545090180361e-05, + "loss": 1.6439, + "step": 275 + }, + { + "epoch": 0.37, + "eval_loss": 1.7427024841308594, + "eval_runtime": 13.3162, + "eval_samples_per_second": 18.774, + "eval_steps_per_second": 2.403, + "step": 275 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 4519386415104000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-275/training_args.bin b/checkpoint-275/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-275/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-300/README.md b/checkpoint-300/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-300/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-300/adapter_config.json b/checkpoint-300/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-300/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-300/adapter_model.safetensors b/checkpoint-300/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e7f1dfbbe9e46b99857c2107c03e63810fca94e3 --- /dev/null +++ b/checkpoint-300/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b47a247d507b84d6037b92a8a2e62919cd8485e659648795c30a9a5c5b35e24 +size 104873776 diff --git a/checkpoint-300/optimizer.pt b/checkpoint-300/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..3f77f41616e0609d047b0f47743a9e51ac8e4b63 --- /dev/null +++ b/checkpoint-300/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33f7c03f18c6b3d7989e38c7566cee8dec68475c0302e3ccde741509305454b4 +size 52680506 diff --git a/checkpoint-300/rng_state.pth b/checkpoint-300/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..6f1e74d9a4ed8895f059d6955a0e4f8ec2d2abd4 --- /dev/null +++ b/checkpoint-300/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13aacd80534b7b21604e901162c9b94350d8b291b0efdc9b03cf29365967c1a1 +size 14244 diff --git a/checkpoint-300/scheduler.pt b/checkpoint-300/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..6752327712c990f73c883b22464dd121f57d8c64 --- /dev/null +++ b/checkpoint-300/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:686cb87e10efdd3e8578989d603552819367659cd31d37f06e080e0a0b7c7e8c +size 1064 diff --git a/checkpoint-300/trainer_state.json b/checkpoint-300/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..5e4ec2ba3e79c77f8ba5e2d0b1ca516f5d3abed7 --- /dev/null +++ b/checkpoint-300/trainer_state.json @@ -0,0 +1,189 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.4, + "eval_steps": 25, + "global_step": 300, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + }, + { + "epoch": 0.23, + "learning_rate": 1.628256513026052e-05, + "loss": 1.6657, + "step": 175 + }, + { + "epoch": 0.23, + "eval_loss": 1.7578012943267822, + "eval_runtime": 13.4284, + "eval_samples_per_second": 18.617, + "eval_steps_per_second": 2.383, + "step": 175 + }, + { + "epoch": 0.27, + "learning_rate": 1.5030060120240483e-05, + "loss": 1.6197, + "step": 200 + }, + { + "epoch": 0.27, + "eval_loss": 1.7506276369094849, + "eval_runtime": 13.5018, + "eval_samples_per_second": 18.516, + "eval_steps_per_second": 2.37, + "step": 200 + }, + { + "epoch": 0.3, + "learning_rate": 1.3777555110220442e-05, + "loss": 1.7643, + "step": 225 + }, + { + "epoch": 0.3, + "eval_loss": 1.745801568031311, + "eval_runtime": 13.5003, + "eval_samples_per_second": 18.518, + "eval_steps_per_second": 2.37, + "step": 225 + }, + { + "epoch": 0.33, + "learning_rate": 1.25250501002004e-05, + "loss": 1.6707, + "step": 250 + }, + { + "epoch": 0.33, + "eval_loss": 1.7450388669967651, + "eval_runtime": 13.3223, + "eval_samples_per_second": 18.765, + "eval_steps_per_second": 2.402, + "step": 250 + }, + { + "epoch": 0.37, + "learning_rate": 1.1272545090180361e-05, + "loss": 1.6439, + "step": 275 + }, + { + "epoch": 0.37, + "eval_loss": 1.7427024841308594, + "eval_runtime": 13.3162, + "eval_samples_per_second": 18.774, + "eval_steps_per_second": 2.403, + "step": 275 + }, + { + "epoch": 0.4, + "learning_rate": 1.0020040080160322e-05, + "loss": 1.5697, + "step": 300 + }, + { + "epoch": 0.4, + "eval_loss": 1.7423481941223145, + "eval_runtime": 13.6598, + "eval_samples_per_second": 18.302, + "eval_steps_per_second": 2.343, + "step": 300 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 4930239725568000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-300/training_args.bin b/checkpoint-300/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-300/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-325/README.md b/checkpoint-325/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-325/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-325/adapter_config.json b/checkpoint-325/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-325/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-325/adapter_model.safetensors b/checkpoint-325/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ab9c529714f075c0b0acc81e3e88cdf6a95e9483 --- /dev/null +++ b/checkpoint-325/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6a673dbfa862a6317071d7993a68cc5144d8101e7bab9ea48997728de37b205 +size 104873776 diff --git a/checkpoint-325/optimizer.pt b/checkpoint-325/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..782bf1346b4d76a71586ec902fcac73f2589aabd --- /dev/null +++ b/checkpoint-325/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df3b359944085e84a2beb67c0e18a1bf963e7a6be648a8390b9730297fec161a +size 52680506 diff --git a/checkpoint-325/rng_state.pth b/checkpoint-325/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..70a5134d6ea505bbde7a152f46cb9553fff19b1d --- /dev/null +++ b/checkpoint-325/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2920234637d3dc8d362ecfe81840dfd0511ff0d5b794fa179821765c50efd1d3 +size 14244 diff --git a/checkpoint-325/scheduler.pt b/checkpoint-325/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..008682961f0b4c669692d209179c6c85300e10df --- /dev/null +++ b/checkpoint-325/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a83711e2029f4305142b5c2d8a3438360e44bedd380ddbfed599567063fffc6 +size 1064 diff --git a/checkpoint-325/trainer_state.json b/checkpoint-325/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..aa0a46389cb4974f62d97eb21ea6e53d05913181 --- /dev/null +++ b/checkpoint-325/trainer_state.json @@ -0,0 +1,203 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.43333333333333335, + "eval_steps": 25, + "global_step": 325, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + }, + { + "epoch": 0.23, + "learning_rate": 1.628256513026052e-05, + "loss": 1.6657, + "step": 175 + }, + { + "epoch": 0.23, + "eval_loss": 1.7578012943267822, + "eval_runtime": 13.4284, + "eval_samples_per_second": 18.617, + "eval_steps_per_second": 2.383, + "step": 175 + }, + { + "epoch": 0.27, + "learning_rate": 1.5030060120240483e-05, + "loss": 1.6197, + "step": 200 + }, + { + "epoch": 0.27, + "eval_loss": 1.7506276369094849, + "eval_runtime": 13.5018, + "eval_samples_per_second": 18.516, + "eval_steps_per_second": 2.37, + "step": 200 + }, + { + "epoch": 0.3, + "learning_rate": 1.3777555110220442e-05, + "loss": 1.7643, + "step": 225 + }, + { + "epoch": 0.3, + "eval_loss": 1.745801568031311, + "eval_runtime": 13.5003, + "eval_samples_per_second": 18.518, + "eval_steps_per_second": 2.37, + "step": 225 + }, + { + "epoch": 0.33, + "learning_rate": 1.25250501002004e-05, + "loss": 1.6707, + "step": 250 + }, + { + "epoch": 0.33, + "eval_loss": 1.7450388669967651, + "eval_runtime": 13.3223, + "eval_samples_per_second": 18.765, + "eval_steps_per_second": 2.402, + "step": 250 + }, + { + "epoch": 0.37, + "learning_rate": 1.1272545090180361e-05, + "loss": 1.6439, + "step": 275 + }, + { + "epoch": 0.37, + "eval_loss": 1.7427024841308594, + "eval_runtime": 13.3162, + "eval_samples_per_second": 18.774, + "eval_steps_per_second": 2.403, + "step": 275 + }, + { + "epoch": 0.4, + "learning_rate": 1.0020040080160322e-05, + "loss": 1.5697, + "step": 300 + }, + { + "epoch": 0.4, + "eval_loss": 1.7423481941223145, + "eval_runtime": 13.6598, + "eval_samples_per_second": 18.302, + "eval_steps_per_second": 2.343, + "step": 300 + }, + { + "epoch": 0.43, + "learning_rate": 8.767535070140282e-06, + "loss": 1.6362, + "step": 325 + }, + { + "epoch": 0.43, + "eval_loss": 1.7418254613876343, + "eval_runtime": 13.299, + "eval_samples_per_second": 18.798, + "eval_steps_per_second": 2.406, + "step": 325 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 5341093036032000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-325/training_args.bin b/checkpoint-325/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-325/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-350/README.md b/checkpoint-350/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-350/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-350/adapter_config.json b/checkpoint-350/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-350/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-350/adapter_model.safetensors b/checkpoint-350/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e65e5e2f8ddb9a15be844f2960c820dd83740554 --- /dev/null +++ b/checkpoint-350/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d45672bafcd3ea753d0f4b7ba45227f7d67d1700ecfd7988be8d8a94b3ee4aca +size 104873776 diff --git a/checkpoint-350/optimizer.pt b/checkpoint-350/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..84b8ab3c3a0224ac7722b43a8baaf9c73c150efd --- /dev/null +++ b/checkpoint-350/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b0ca517613584181b4037d2e0772a8f852de3763bae99eaa987f38541a3f749 +size 52680506 diff --git a/checkpoint-350/rng_state.pth b/checkpoint-350/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..913eee7e77e0d71239c099571a61871e25dd6b53 --- /dev/null +++ b/checkpoint-350/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1641c74fc1806a1eb7950b1d9636c819f7a084e1cb964339d18997b630e22bea +size 14244 diff --git a/checkpoint-350/scheduler.pt b/checkpoint-350/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..c51d1e4b4f2aabff6650d3ba9b1879ffca471b9a --- /dev/null +++ b/checkpoint-350/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94f2b3b9840cc3dd94f559e92be39e6ba60dc176aa44a51953db4ab0c0cd8fc8 +size 1064 diff --git a/checkpoint-350/trainer_state.json b/checkpoint-350/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..83b6375944e91f98cc6b504880c8ad6a736c5324 --- /dev/null +++ b/checkpoint-350/trainer_state.json @@ -0,0 +1,217 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.4666666666666667, + "eval_steps": 25, + "global_step": 350, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + }, + { + "epoch": 0.23, + "learning_rate": 1.628256513026052e-05, + "loss": 1.6657, + "step": 175 + }, + { + "epoch": 0.23, + "eval_loss": 1.7578012943267822, + "eval_runtime": 13.4284, + "eval_samples_per_second": 18.617, + "eval_steps_per_second": 2.383, + "step": 175 + }, + { + "epoch": 0.27, + "learning_rate": 1.5030060120240483e-05, + "loss": 1.6197, + "step": 200 + }, + { + "epoch": 0.27, + "eval_loss": 1.7506276369094849, + "eval_runtime": 13.5018, + "eval_samples_per_second": 18.516, + "eval_steps_per_second": 2.37, + "step": 200 + }, + { + "epoch": 0.3, + "learning_rate": 1.3777555110220442e-05, + "loss": 1.7643, + "step": 225 + }, + { + "epoch": 0.3, + "eval_loss": 1.745801568031311, + "eval_runtime": 13.5003, + "eval_samples_per_second": 18.518, + "eval_steps_per_second": 2.37, + "step": 225 + }, + { + "epoch": 0.33, + "learning_rate": 1.25250501002004e-05, + "loss": 1.6707, + "step": 250 + }, + { + "epoch": 0.33, + "eval_loss": 1.7450388669967651, + "eval_runtime": 13.3223, + "eval_samples_per_second": 18.765, + "eval_steps_per_second": 2.402, + "step": 250 + }, + { + "epoch": 0.37, + "learning_rate": 1.1272545090180361e-05, + "loss": 1.6439, + "step": 275 + }, + { + "epoch": 0.37, + "eval_loss": 1.7427024841308594, + "eval_runtime": 13.3162, + "eval_samples_per_second": 18.774, + "eval_steps_per_second": 2.403, + "step": 275 + }, + { + "epoch": 0.4, + "learning_rate": 1.0020040080160322e-05, + "loss": 1.5697, + "step": 300 + }, + { + "epoch": 0.4, + "eval_loss": 1.7423481941223145, + "eval_runtime": 13.6598, + "eval_samples_per_second": 18.302, + "eval_steps_per_second": 2.343, + "step": 300 + }, + { + "epoch": 0.43, + "learning_rate": 8.767535070140282e-06, + "loss": 1.6362, + "step": 325 + }, + { + "epoch": 0.43, + "eval_loss": 1.7418254613876343, + "eval_runtime": 13.299, + "eval_samples_per_second": 18.798, + "eval_steps_per_second": 2.406, + "step": 325 + }, + { + "epoch": 0.47, + "learning_rate": 7.515030060120242e-06, + "loss": 1.607, + "step": 350 + }, + { + "epoch": 0.47, + "eval_loss": 1.738572120666504, + "eval_runtime": 13.7472, + "eval_samples_per_second": 18.186, + "eval_steps_per_second": 2.328, + "step": 350 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 5751946346496000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-350/training_args.bin b/checkpoint-350/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-350/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-375/README.md b/checkpoint-375/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-375/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-375/adapter_config.json b/checkpoint-375/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-375/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-375/adapter_model.safetensors b/checkpoint-375/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..078c889b4a97ac78636744becef3297f4126ab93 --- /dev/null +++ b/checkpoint-375/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53cea0890c0ae7415710c1c44de123427d98a8b7f2a3c17e0512ad8d4c661ab2 +size 104873776 diff --git a/checkpoint-375/optimizer.pt b/checkpoint-375/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..a97c7d1c1b3434df41b1d50cc132b718a723a721 --- /dev/null +++ b/checkpoint-375/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b8c1517724d82e77ea8068eb4a5b9f4fd96cae226e2303e84cc5cee2fde7bbe +size 52680506 diff --git a/checkpoint-375/rng_state.pth b/checkpoint-375/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..0918984846b02d6bd612975153aa3ddbdff35a0f --- /dev/null +++ b/checkpoint-375/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f97fee03057c1512eabc366c546ed884c4a001d6b0ced2d74774389d0b32f3f4 +size 14244 diff --git a/checkpoint-375/scheduler.pt b/checkpoint-375/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..5bd750a0d8b08bd449bff9f9719adc245fcde3b6 --- /dev/null +++ b/checkpoint-375/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32e6b33ef13e107d0d32662ad56a47da6d8bda2a85304772bd98c6ade48fab9d +size 1064 diff --git a/checkpoint-375/trainer_state.json b/checkpoint-375/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..139798c9951ce508423aa7dd920257806ad79129 --- /dev/null +++ b/checkpoint-375/trainer_state.json @@ -0,0 +1,231 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.5, + "eval_steps": 25, + "global_step": 375, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + }, + { + "epoch": 0.23, + "learning_rate": 1.628256513026052e-05, + "loss": 1.6657, + "step": 175 + }, + { + "epoch": 0.23, + "eval_loss": 1.7578012943267822, + "eval_runtime": 13.4284, + "eval_samples_per_second": 18.617, + "eval_steps_per_second": 2.383, + "step": 175 + }, + { + "epoch": 0.27, + "learning_rate": 1.5030060120240483e-05, + "loss": 1.6197, + "step": 200 + }, + { + "epoch": 0.27, + "eval_loss": 1.7506276369094849, + "eval_runtime": 13.5018, + "eval_samples_per_second": 18.516, + "eval_steps_per_second": 2.37, + "step": 200 + }, + { + "epoch": 0.3, + "learning_rate": 1.3777555110220442e-05, + "loss": 1.7643, + "step": 225 + }, + { + "epoch": 0.3, + "eval_loss": 1.745801568031311, + "eval_runtime": 13.5003, + "eval_samples_per_second": 18.518, + "eval_steps_per_second": 2.37, + "step": 225 + }, + { + "epoch": 0.33, + "learning_rate": 1.25250501002004e-05, + "loss": 1.6707, + "step": 250 + }, + { + "epoch": 0.33, + "eval_loss": 1.7450388669967651, + "eval_runtime": 13.3223, + "eval_samples_per_second": 18.765, + "eval_steps_per_second": 2.402, + "step": 250 + }, + { + "epoch": 0.37, + "learning_rate": 1.1272545090180361e-05, + "loss": 1.6439, + "step": 275 + }, + { + "epoch": 0.37, + "eval_loss": 1.7427024841308594, + "eval_runtime": 13.3162, + "eval_samples_per_second": 18.774, + "eval_steps_per_second": 2.403, + "step": 275 + }, + { + "epoch": 0.4, + "learning_rate": 1.0020040080160322e-05, + "loss": 1.5697, + "step": 300 + }, + { + "epoch": 0.4, + "eval_loss": 1.7423481941223145, + "eval_runtime": 13.6598, + "eval_samples_per_second": 18.302, + "eval_steps_per_second": 2.343, + "step": 300 + }, + { + "epoch": 0.43, + "learning_rate": 8.767535070140282e-06, + "loss": 1.6362, + "step": 325 + }, + { + "epoch": 0.43, + "eval_loss": 1.7418254613876343, + "eval_runtime": 13.299, + "eval_samples_per_second": 18.798, + "eval_steps_per_second": 2.406, + "step": 325 + }, + { + "epoch": 0.47, + "learning_rate": 7.515030060120242e-06, + "loss": 1.607, + "step": 350 + }, + { + "epoch": 0.47, + "eval_loss": 1.738572120666504, + "eval_runtime": 13.7472, + "eval_samples_per_second": 18.186, + "eval_steps_per_second": 2.328, + "step": 350 + }, + { + "epoch": 0.5, + "learning_rate": 6.2625250501002e-06, + "loss": 1.5168, + "step": 375 + }, + { + "epoch": 0.5, + "eval_loss": 1.7354249954223633, + "eval_runtime": 13.5456, + "eval_samples_per_second": 18.456, + "eval_steps_per_second": 2.362, + "step": 375 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 6162799656960000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-375/training_args.bin b/checkpoint-375/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-375/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-400/README.md b/checkpoint-400/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-400/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-400/adapter_config.json b/checkpoint-400/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-400/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-400/adapter_model.safetensors b/checkpoint-400/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..47c86843138eeb7c22761d89afcebeb38885c49d --- /dev/null +++ b/checkpoint-400/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28dd5d1d954e754d38a3bd0d03376b4f4ec687dadc41611d6bb11150e3f1ba74 +size 104873776 diff --git a/checkpoint-400/optimizer.pt b/checkpoint-400/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..43576df29065d29097adfda3a21b5278f1e6c83b --- /dev/null +++ b/checkpoint-400/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fcb3886d2f0c45ba859b7bacd4d660a67e9f1d66e2ba0a9361490b05f669743 +size 52680506 diff --git a/checkpoint-400/rng_state.pth b/checkpoint-400/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..ceae686fdeec7fcfe501b91191296722afc2c8f7 --- /dev/null +++ b/checkpoint-400/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2031e1e3595cf20b5a8f7107644aeb41da99c223b13fb21687c89dea487e5cda +size 14244 diff --git a/checkpoint-400/scheduler.pt b/checkpoint-400/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..65dfdec4a60932a500553575a1efe9374de83d60 --- /dev/null +++ b/checkpoint-400/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:787348458891c3fbf4b1c661d56b1b0a2fcac35199811a8f13abee6c7ea71dd6 +size 1064 diff --git a/checkpoint-400/trainer_state.json b/checkpoint-400/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..12dfacf1fd1eb01715749e544f23de16c4c7d756 --- /dev/null +++ b/checkpoint-400/trainer_state.json @@ -0,0 +1,245 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.5333333333333333, + "eval_steps": 25, + "global_step": 400, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + }, + { + "epoch": 0.23, + "learning_rate": 1.628256513026052e-05, + "loss": 1.6657, + "step": 175 + }, + { + "epoch": 0.23, + "eval_loss": 1.7578012943267822, + "eval_runtime": 13.4284, + "eval_samples_per_second": 18.617, + "eval_steps_per_second": 2.383, + "step": 175 + }, + { + "epoch": 0.27, + "learning_rate": 1.5030060120240483e-05, + "loss": 1.6197, + "step": 200 + }, + { + "epoch": 0.27, + "eval_loss": 1.7506276369094849, + "eval_runtime": 13.5018, + "eval_samples_per_second": 18.516, + "eval_steps_per_second": 2.37, + "step": 200 + }, + { + "epoch": 0.3, + "learning_rate": 1.3777555110220442e-05, + "loss": 1.7643, + "step": 225 + }, + { + "epoch": 0.3, + "eval_loss": 1.745801568031311, + "eval_runtime": 13.5003, + "eval_samples_per_second": 18.518, + "eval_steps_per_second": 2.37, + "step": 225 + }, + { + "epoch": 0.33, + "learning_rate": 1.25250501002004e-05, + "loss": 1.6707, + "step": 250 + }, + { + "epoch": 0.33, + "eval_loss": 1.7450388669967651, + "eval_runtime": 13.3223, + "eval_samples_per_second": 18.765, + "eval_steps_per_second": 2.402, + "step": 250 + }, + { + "epoch": 0.37, + "learning_rate": 1.1272545090180361e-05, + "loss": 1.6439, + "step": 275 + }, + { + "epoch": 0.37, + "eval_loss": 1.7427024841308594, + "eval_runtime": 13.3162, + "eval_samples_per_second": 18.774, + "eval_steps_per_second": 2.403, + "step": 275 + }, + { + "epoch": 0.4, + "learning_rate": 1.0020040080160322e-05, + "loss": 1.5697, + "step": 300 + }, + { + "epoch": 0.4, + "eval_loss": 1.7423481941223145, + "eval_runtime": 13.6598, + "eval_samples_per_second": 18.302, + "eval_steps_per_second": 2.343, + "step": 300 + }, + { + "epoch": 0.43, + "learning_rate": 8.767535070140282e-06, + "loss": 1.6362, + "step": 325 + }, + { + "epoch": 0.43, + "eval_loss": 1.7418254613876343, + "eval_runtime": 13.299, + "eval_samples_per_second": 18.798, + "eval_steps_per_second": 2.406, + "step": 325 + }, + { + "epoch": 0.47, + "learning_rate": 7.515030060120242e-06, + "loss": 1.607, + "step": 350 + }, + { + "epoch": 0.47, + "eval_loss": 1.738572120666504, + "eval_runtime": 13.7472, + "eval_samples_per_second": 18.186, + "eval_steps_per_second": 2.328, + "step": 350 + }, + { + "epoch": 0.5, + "learning_rate": 6.2625250501002e-06, + "loss": 1.5168, + "step": 375 + }, + { + "epoch": 0.5, + "eval_loss": 1.7354249954223633, + "eval_runtime": 13.5456, + "eval_samples_per_second": 18.456, + "eval_steps_per_second": 2.362, + "step": 375 + }, + { + "epoch": 0.53, + "learning_rate": 5.010020040080161e-06, + "loss": 1.6947, + "step": 400 + }, + { + "epoch": 0.53, + "eval_loss": 1.7342841625213623, + "eval_runtime": 13.8125, + "eval_samples_per_second": 18.1, + "eval_steps_per_second": 2.317, + "step": 400 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 6573652967424000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-400/training_args.bin b/checkpoint-400/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-400/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-425/README.md b/checkpoint-425/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-425/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-425/adapter_config.json b/checkpoint-425/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-425/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-425/adapter_model.safetensors b/checkpoint-425/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9d0c6ad053ae23180bf3f2d8b92220f4336a598f --- /dev/null +++ b/checkpoint-425/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88c8479fe5f456830dc7d6ca2738476f377e42e9838006e059896c6da6901a94 +size 104873776 diff --git a/checkpoint-425/optimizer.pt b/checkpoint-425/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..bf66a55978ca4cbeb8a18a3ede7b7243ef645518 --- /dev/null +++ b/checkpoint-425/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e026b4d79aa820da16e38cfdab0baaa5c4f6ee22755876bd34ff418133e7d96b +size 52680506 diff --git a/checkpoint-425/rng_state.pth b/checkpoint-425/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..19ea503e42b8181e296b17ff95ee873d00a2b864 --- /dev/null +++ b/checkpoint-425/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c071352034922270a8990d7c33137528e09bc02ef06a87d9c9ccf26bab0c72cd +size 14244 diff --git a/checkpoint-425/scheduler.pt b/checkpoint-425/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..54f92c32f774d1457d393f634e610d8e59d7713a --- /dev/null +++ b/checkpoint-425/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:504bab57ab5a16c2f9362855448c874e5fdadaf20d320bb813801ae0e6affd1f +size 1064 diff --git a/checkpoint-425/trainer_state.json b/checkpoint-425/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..c50f11d8b5dd3bdb4ad25cbd78a7d0307fdddffa --- /dev/null +++ b/checkpoint-425/trainer_state.json @@ -0,0 +1,259 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.5666666666666667, + "eval_steps": 25, + "global_step": 425, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + }, + { + "epoch": 0.23, + "learning_rate": 1.628256513026052e-05, + "loss": 1.6657, + "step": 175 + }, + { + "epoch": 0.23, + "eval_loss": 1.7578012943267822, + "eval_runtime": 13.4284, + "eval_samples_per_second": 18.617, + "eval_steps_per_second": 2.383, + "step": 175 + }, + { + "epoch": 0.27, + "learning_rate": 1.5030060120240483e-05, + "loss": 1.6197, + "step": 200 + }, + { + "epoch": 0.27, + "eval_loss": 1.7506276369094849, + "eval_runtime": 13.5018, + "eval_samples_per_second": 18.516, + "eval_steps_per_second": 2.37, + "step": 200 + }, + { + "epoch": 0.3, + "learning_rate": 1.3777555110220442e-05, + "loss": 1.7643, + "step": 225 + }, + { + "epoch": 0.3, + "eval_loss": 1.745801568031311, + "eval_runtime": 13.5003, + "eval_samples_per_second": 18.518, + "eval_steps_per_second": 2.37, + "step": 225 + }, + { + "epoch": 0.33, + "learning_rate": 1.25250501002004e-05, + "loss": 1.6707, + "step": 250 + }, + { + "epoch": 0.33, + "eval_loss": 1.7450388669967651, + "eval_runtime": 13.3223, + "eval_samples_per_second": 18.765, + "eval_steps_per_second": 2.402, + "step": 250 + }, + { + "epoch": 0.37, + "learning_rate": 1.1272545090180361e-05, + "loss": 1.6439, + "step": 275 + }, + { + "epoch": 0.37, + "eval_loss": 1.7427024841308594, + "eval_runtime": 13.3162, + "eval_samples_per_second": 18.774, + "eval_steps_per_second": 2.403, + "step": 275 + }, + { + "epoch": 0.4, + "learning_rate": 1.0020040080160322e-05, + "loss": 1.5697, + "step": 300 + }, + { + "epoch": 0.4, + "eval_loss": 1.7423481941223145, + "eval_runtime": 13.6598, + "eval_samples_per_second": 18.302, + "eval_steps_per_second": 2.343, + "step": 300 + }, + { + "epoch": 0.43, + "learning_rate": 8.767535070140282e-06, + "loss": 1.6362, + "step": 325 + }, + { + "epoch": 0.43, + "eval_loss": 1.7418254613876343, + "eval_runtime": 13.299, + "eval_samples_per_second": 18.798, + "eval_steps_per_second": 2.406, + "step": 325 + }, + { + "epoch": 0.47, + "learning_rate": 7.515030060120242e-06, + "loss": 1.607, + "step": 350 + }, + { + "epoch": 0.47, + "eval_loss": 1.738572120666504, + "eval_runtime": 13.7472, + "eval_samples_per_second": 18.186, + "eval_steps_per_second": 2.328, + "step": 350 + }, + { + "epoch": 0.5, + "learning_rate": 6.2625250501002e-06, + "loss": 1.5168, + "step": 375 + }, + { + "epoch": 0.5, + "eval_loss": 1.7354249954223633, + "eval_runtime": 13.5456, + "eval_samples_per_second": 18.456, + "eval_steps_per_second": 2.362, + "step": 375 + }, + { + "epoch": 0.53, + "learning_rate": 5.010020040080161e-06, + "loss": 1.6947, + "step": 400 + }, + { + "epoch": 0.53, + "eval_loss": 1.7342841625213623, + "eval_runtime": 13.8125, + "eval_samples_per_second": 18.1, + "eval_steps_per_second": 2.317, + "step": 400 + }, + { + "epoch": 0.57, + "learning_rate": 3.757515030060121e-06, + "loss": 1.6161, + "step": 425 + }, + { + "epoch": 0.57, + "eval_loss": 1.7329128980636597, + "eval_runtime": 13.8207, + "eval_samples_per_second": 18.089, + "eval_steps_per_second": 2.315, + "step": 425 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 6984506277888000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-425/training_args.bin b/checkpoint-425/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-425/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-450/README.md b/checkpoint-450/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-450/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-450/adapter_config.json b/checkpoint-450/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-450/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-450/adapter_model.safetensors b/checkpoint-450/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3023a7d747d30170f68979915e8e76c82fd0c766 --- /dev/null +++ b/checkpoint-450/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0800e7db6191ed70558afbfbec431e04ec8dfedcf7c5a19b229f8dc1edf46c3 +size 104873776 diff --git a/checkpoint-450/optimizer.pt b/checkpoint-450/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..0141fcad061f1f9f6959de7eb42320fef14921fe --- /dev/null +++ b/checkpoint-450/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbc14ecacf60dddcdb1a06db6137a8466cf60aa1c26f1171496135de5fecbdf7 +size 52680506 diff --git a/checkpoint-450/rng_state.pth b/checkpoint-450/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..1f64e0a2db4e9e57c5f78a5f2e3949f4a34c5f3e --- /dev/null +++ b/checkpoint-450/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:685efe547fabbd545014d8ba562394aacbfc7da02eadf7df834920f91f0b4a2a +size 14244 diff --git a/checkpoint-450/scheduler.pt b/checkpoint-450/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..ceab89cc64627d3f15ad5078df5083e32867d819 --- /dev/null +++ b/checkpoint-450/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51a5430b12c48a76a1e6dc4f918563bb5ee76625f3646d55d31be91e2633c98d +size 1064 diff --git a/checkpoint-450/trainer_state.json b/checkpoint-450/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..f8276f185848cc43748840651680414f08f28ee7 --- /dev/null +++ b/checkpoint-450/trainer_state.json @@ -0,0 +1,273 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.6, + "eval_steps": 25, + "global_step": 450, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + }, + { + "epoch": 0.23, + "learning_rate": 1.628256513026052e-05, + "loss": 1.6657, + "step": 175 + }, + { + "epoch": 0.23, + "eval_loss": 1.7578012943267822, + "eval_runtime": 13.4284, + "eval_samples_per_second": 18.617, + "eval_steps_per_second": 2.383, + "step": 175 + }, + { + "epoch": 0.27, + "learning_rate": 1.5030060120240483e-05, + "loss": 1.6197, + "step": 200 + }, + { + "epoch": 0.27, + "eval_loss": 1.7506276369094849, + "eval_runtime": 13.5018, + "eval_samples_per_second": 18.516, + "eval_steps_per_second": 2.37, + "step": 200 + }, + { + "epoch": 0.3, + "learning_rate": 1.3777555110220442e-05, + "loss": 1.7643, + "step": 225 + }, + { + "epoch": 0.3, + "eval_loss": 1.745801568031311, + "eval_runtime": 13.5003, + "eval_samples_per_second": 18.518, + "eval_steps_per_second": 2.37, + "step": 225 + }, + { + "epoch": 0.33, + "learning_rate": 1.25250501002004e-05, + "loss": 1.6707, + "step": 250 + }, + { + "epoch": 0.33, + "eval_loss": 1.7450388669967651, + "eval_runtime": 13.3223, + "eval_samples_per_second": 18.765, + "eval_steps_per_second": 2.402, + "step": 250 + }, + { + "epoch": 0.37, + "learning_rate": 1.1272545090180361e-05, + "loss": 1.6439, + "step": 275 + }, + { + "epoch": 0.37, + "eval_loss": 1.7427024841308594, + "eval_runtime": 13.3162, + "eval_samples_per_second": 18.774, + "eval_steps_per_second": 2.403, + "step": 275 + }, + { + "epoch": 0.4, + "learning_rate": 1.0020040080160322e-05, + "loss": 1.5697, + "step": 300 + }, + { + "epoch": 0.4, + "eval_loss": 1.7423481941223145, + "eval_runtime": 13.6598, + "eval_samples_per_second": 18.302, + "eval_steps_per_second": 2.343, + "step": 300 + }, + { + "epoch": 0.43, + "learning_rate": 8.767535070140282e-06, + "loss": 1.6362, + "step": 325 + }, + { + "epoch": 0.43, + "eval_loss": 1.7418254613876343, + "eval_runtime": 13.299, + "eval_samples_per_second": 18.798, + "eval_steps_per_second": 2.406, + "step": 325 + }, + { + "epoch": 0.47, + "learning_rate": 7.515030060120242e-06, + "loss": 1.607, + "step": 350 + }, + { + "epoch": 0.47, + "eval_loss": 1.738572120666504, + "eval_runtime": 13.7472, + "eval_samples_per_second": 18.186, + "eval_steps_per_second": 2.328, + "step": 350 + }, + { + "epoch": 0.5, + "learning_rate": 6.2625250501002e-06, + "loss": 1.5168, + "step": 375 + }, + { + "epoch": 0.5, + "eval_loss": 1.7354249954223633, + "eval_runtime": 13.5456, + "eval_samples_per_second": 18.456, + "eval_steps_per_second": 2.362, + "step": 375 + }, + { + "epoch": 0.53, + "learning_rate": 5.010020040080161e-06, + "loss": 1.6947, + "step": 400 + }, + { + "epoch": 0.53, + "eval_loss": 1.7342841625213623, + "eval_runtime": 13.8125, + "eval_samples_per_second": 18.1, + "eval_steps_per_second": 2.317, + "step": 400 + }, + { + "epoch": 0.57, + "learning_rate": 3.757515030060121e-06, + "loss": 1.6161, + "step": 425 + }, + { + "epoch": 0.57, + "eval_loss": 1.7329128980636597, + "eval_runtime": 13.8207, + "eval_samples_per_second": 18.089, + "eval_steps_per_second": 2.315, + "step": 425 + }, + { + "epoch": 0.6, + "learning_rate": 2.5050100200400804e-06, + "loss": 1.6127, + "step": 450 + }, + { + "epoch": 0.6, + "eval_loss": 1.7326769828796387, + "eval_runtime": 13.8679, + "eval_samples_per_second": 18.027, + "eval_steps_per_second": 2.307, + "step": 450 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 7395359588352000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-450/training_args.bin b/checkpoint-450/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-450/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-475/README.md b/checkpoint-475/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-475/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-475/adapter_config.json b/checkpoint-475/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-475/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-475/adapter_model.safetensors b/checkpoint-475/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..fb96574e60492adb5e9d0a2b8e360259ccae325f --- /dev/null +++ b/checkpoint-475/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f87543c033c008fbe16c1958b2b335cb8ea753dae57f49c0a697df2559893e9e +size 104873776 diff --git a/checkpoint-475/optimizer.pt b/checkpoint-475/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..b3f0f51f8393a2a25b148401a6fcea0904a3be68 --- /dev/null +++ b/checkpoint-475/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45df7677f37ba5547a60ed487526cf0e94e833f8431c2333496788fe0629bc5f +size 52680506 diff --git a/checkpoint-475/rng_state.pth b/checkpoint-475/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..c9f9c01c469aba88495f613c72c96111e99d0558 --- /dev/null +++ b/checkpoint-475/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70a66a812c304a4d0d93b9a37d1bc859532741546e0d30069737c8d222532b9b +size 14244 diff --git a/checkpoint-475/scheduler.pt b/checkpoint-475/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..d5ab1965bb9f28ca5cd8179533c5b8e5f25ed5db --- /dev/null +++ b/checkpoint-475/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e62a1b156546b74e69387fcb3953db05f910e37a5ece6ce413024af6ae9ae66 +size 1064 diff --git a/checkpoint-475/trainer_state.json b/checkpoint-475/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..1f546d1ff5174d046e369876cc255cdde88ffd9f --- /dev/null +++ b/checkpoint-475/trainer_state.json @@ -0,0 +1,287 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.6333333333333333, + "eval_steps": 25, + "global_step": 475, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + }, + { + "epoch": 0.23, + "learning_rate": 1.628256513026052e-05, + "loss": 1.6657, + "step": 175 + }, + { + "epoch": 0.23, + "eval_loss": 1.7578012943267822, + "eval_runtime": 13.4284, + "eval_samples_per_second": 18.617, + "eval_steps_per_second": 2.383, + "step": 175 + }, + { + "epoch": 0.27, + "learning_rate": 1.5030060120240483e-05, + "loss": 1.6197, + "step": 200 + }, + { + "epoch": 0.27, + "eval_loss": 1.7506276369094849, + "eval_runtime": 13.5018, + "eval_samples_per_second": 18.516, + "eval_steps_per_second": 2.37, + "step": 200 + }, + { + "epoch": 0.3, + "learning_rate": 1.3777555110220442e-05, + "loss": 1.7643, + "step": 225 + }, + { + "epoch": 0.3, + "eval_loss": 1.745801568031311, + "eval_runtime": 13.5003, + "eval_samples_per_second": 18.518, + "eval_steps_per_second": 2.37, + "step": 225 + }, + { + "epoch": 0.33, + "learning_rate": 1.25250501002004e-05, + "loss": 1.6707, + "step": 250 + }, + { + "epoch": 0.33, + "eval_loss": 1.7450388669967651, + "eval_runtime": 13.3223, + "eval_samples_per_second": 18.765, + "eval_steps_per_second": 2.402, + "step": 250 + }, + { + "epoch": 0.37, + "learning_rate": 1.1272545090180361e-05, + "loss": 1.6439, + "step": 275 + }, + { + "epoch": 0.37, + "eval_loss": 1.7427024841308594, + "eval_runtime": 13.3162, + "eval_samples_per_second": 18.774, + "eval_steps_per_second": 2.403, + "step": 275 + }, + { + "epoch": 0.4, + "learning_rate": 1.0020040080160322e-05, + "loss": 1.5697, + "step": 300 + }, + { + "epoch": 0.4, + "eval_loss": 1.7423481941223145, + "eval_runtime": 13.6598, + "eval_samples_per_second": 18.302, + "eval_steps_per_second": 2.343, + "step": 300 + }, + { + "epoch": 0.43, + "learning_rate": 8.767535070140282e-06, + "loss": 1.6362, + "step": 325 + }, + { + "epoch": 0.43, + "eval_loss": 1.7418254613876343, + "eval_runtime": 13.299, + "eval_samples_per_second": 18.798, + "eval_steps_per_second": 2.406, + "step": 325 + }, + { + "epoch": 0.47, + "learning_rate": 7.515030060120242e-06, + "loss": 1.607, + "step": 350 + }, + { + "epoch": 0.47, + "eval_loss": 1.738572120666504, + "eval_runtime": 13.7472, + "eval_samples_per_second": 18.186, + "eval_steps_per_second": 2.328, + "step": 350 + }, + { + "epoch": 0.5, + "learning_rate": 6.2625250501002e-06, + "loss": 1.5168, + "step": 375 + }, + { + "epoch": 0.5, + "eval_loss": 1.7354249954223633, + "eval_runtime": 13.5456, + "eval_samples_per_second": 18.456, + "eval_steps_per_second": 2.362, + "step": 375 + }, + { + "epoch": 0.53, + "learning_rate": 5.010020040080161e-06, + "loss": 1.6947, + "step": 400 + }, + { + "epoch": 0.53, + "eval_loss": 1.7342841625213623, + "eval_runtime": 13.8125, + "eval_samples_per_second": 18.1, + "eval_steps_per_second": 2.317, + "step": 400 + }, + { + "epoch": 0.57, + "learning_rate": 3.757515030060121e-06, + "loss": 1.6161, + "step": 425 + }, + { + "epoch": 0.57, + "eval_loss": 1.7329128980636597, + "eval_runtime": 13.8207, + "eval_samples_per_second": 18.089, + "eval_steps_per_second": 2.315, + "step": 425 + }, + { + "epoch": 0.6, + "learning_rate": 2.5050100200400804e-06, + "loss": 1.6127, + "step": 450 + }, + { + "epoch": 0.6, + "eval_loss": 1.7326769828796387, + "eval_runtime": 13.8679, + "eval_samples_per_second": 18.027, + "eval_steps_per_second": 2.307, + "step": 450 + }, + { + "epoch": 0.63, + "learning_rate": 1.2525050100200402e-06, + "loss": 1.5995, + "step": 475 + }, + { + "epoch": 0.63, + "eval_loss": 1.733328104019165, + "eval_runtime": 13.6867, + "eval_samples_per_second": 18.266, + "eval_steps_per_second": 2.338, + "step": 475 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 7806212898816000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-475/training_args.bin b/checkpoint-475/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-475/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-50/README.md b/checkpoint-50/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-50/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-50/adapter_config.json b/checkpoint-50/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-50/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-50/adapter_model.safetensors b/checkpoint-50/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..612090bd97d3c1d2374a4b1fcf95029ccf570bc7 --- /dev/null +++ b/checkpoint-50/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53a75b4186952dab928cac09ff034f3e72dabd259648c1eea200fdde81594a3d +size 104873776 diff --git a/checkpoint-50/optimizer.pt b/checkpoint-50/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..8622c5f84a43da086f70831c612575718f03eefa --- /dev/null +++ b/checkpoint-50/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:103f0368df558d7b856db300c8c3e6b31d32a424702871968bec6aeb1039ddf3 +size 52680378 diff --git a/checkpoint-50/rng_state.pth b/checkpoint-50/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..b57aa395aa8325bb7f5843dabf61958fa33c10a0 --- /dev/null +++ b/checkpoint-50/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d838e12c1191d1341ff6f2f5c45200aaa685dabe29dd02ad1381a406ea211050 +size 14244 diff --git a/checkpoint-50/scheduler.pt b/checkpoint-50/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..0c451a51faaeee2d01fa3ee6dba0b3afc87dc059 --- /dev/null +++ b/checkpoint-50/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80f59abeb3889cb25b6b24a6c4bd5cc79cf04f9ce6bffc47ca32471d35b90f2b +size 1064 diff --git a/checkpoint-50/trainer_state.json b/checkpoint-50/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..3e07354cc9820bd0c893f299d5975f708cbe89fa --- /dev/null +++ b/checkpoint-50/trainer_state.json @@ -0,0 +1,49 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.06666666666666667, + "eval_steps": 25, + "global_step": 50, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 821706620928000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-50/training_args.bin b/checkpoint-50/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-50/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-500/README.md b/checkpoint-500/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-500/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-500/adapter_config.json b/checkpoint-500/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-500/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-500/adapter_model.safetensors b/checkpoint-500/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..0d6a798f614efb3bdf9ba881d3f7e17ef9243943 --- /dev/null +++ b/checkpoint-500/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0686cbe507c5abc5b34ebdb522ad01558fcf73dd3a4728c5d4f8053630c6d16f +size 104873776 diff --git a/checkpoint-500/optimizer.pt b/checkpoint-500/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..13308b8046e2cf20e2d305f344c83dd90e910290 --- /dev/null +++ b/checkpoint-500/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3b89825fac51301a208422812c95794b65c67d050abcaf42afa56b845c1e71f +size 52680506 diff --git a/checkpoint-500/rng_state.pth b/checkpoint-500/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..d6d2237a8e2abce16f5165e68de268bf0c230823 --- /dev/null +++ b/checkpoint-500/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3514519d5746f43dc0503fc0fb144428f0bb4884e873a45dc794ea433664954f +size 14244 diff --git a/checkpoint-500/scheduler.pt b/checkpoint-500/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..b2daa8704d24b9dd3e85b36b335044b511f1096c --- /dev/null +++ b/checkpoint-500/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e8669b3e7778505d584766a7bee20546ae43a399da83ad08f5a9789e64cb201 +size 1064 diff --git a/checkpoint-500/trainer_state.json b/checkpoint-500/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..a8ab0fc823c405328cf3e6d03fb6a5d54f213ef5 --- /dev/null +++ b/checkpoint-500/trainer_state.json @@ -0,0 +1,301 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.6666666666666666, + "eval_steps": 25, + "global_step": 500, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + }, + { + "epoch": 0.13, + "learning_rate": 2.0040080160320643e-05, + "loss": 1.705, + "step": 100 + }, + { + "epoch": 0.13, + "eval_loss": 1.7801309823989868, + "eval_runtime": 13.4868, + "eval_samples_per_second": 18.537, + "eval_steps_per_second": 2.373, + "step": 100 + }, + { + "epoch": 0.17, + "learning_rate": 1.87875751503006e-05, + "loss": 1.6288, + "step": 125 + }, + { + "epoch": 0.17, + "eval_loss": 1.7698311805725098, + "eval_runtime": 13.6256, + "eval_samples_per_second": 18.348, + "eval_steps_per_second": 2.349, + "step": 125 + }, + { + "epoch": 0.2, + "learning_rate": 1.7535070140280564e-05, + "loss": 1.7262, + "step": 150 + }, + { + "epoch": 0.2, + "eval_loss": 1.7579054832458496, + "eval_runtime": 13.6761, + "eval_samples_per_second": 18.28, + "eval_steps_per_second": 2.34, + "step": 150 + }, + { + "epoch": 0.23, + "learning_rate": 1.628256513026052e-05, + "loss": 1.6657, + "step": 175 + }, + { + "epoch": 0.23, + "eval_loss": 1.7578012943267822, + "eval_runtime": 13.4284, + "eval_samples_per_second": 18.617, + "eval_steps_per_second": 2.383, + "step": 175 + }, + { + "epoch": 0.27, + "learning_rate": 1.5030060120240483e-05, + "loss": 1.6197, + "step": 200 + }, + { + "epoch": 0.27, + "eval_loss": 1.7506276369094849, + "eval_runtime": 13.5018, + "eval_samples_per_second": 18.516, + "eval_steps_per_second": 2.37, + "step": 200 + }, + { + "epoch": 0.3, + "learning_rate": 1.3777555110220442e-05, + "loss": 1.7643, + "step": 225 + }, + { + "epoch": 0.3, + "eval_loss": 1.745801568031311, + "eval_runtime": 13.5003, + "eval_samples_per_second": 18.518, + "eval_steps_per_second": 2.37, + "step": 225 + }, + { + "epoch": 0.33, + "learning_rate": 1.25250501002004e-05, + "loss": 1.6707, + "step": 250 + }, + { + "epoch": 0.33, + "eval_loss": 1.7450388669967651, + "eval_runtime": 13.3223, + "eval_samples_per_second": 18.765, + "eval_steps_per_second": 2.402, + "step": 250 + }, + { + "epoch": 0.37, + "learning_rate": 1.1272545090180361e-05, + "loss": 1.6439, + "step": 275 + }, + { + "epoch": 0.37, + "eval_loss": 1.7427024841308594, + "eval_runtime": 13.3162, + "eval_samples_per_second": 18.774, + "eval_steps_per_second": 2.403, + "step": 275 + }, + { + "epoch": 0.4, + "learning_rate": 1.0020040080160322e-05, + "loss": 1.5697, + "step": 300 + }, + { + "epoch": 0.4, + "eval_loss": 1.7423481941223145, + "eval_runtime": 13.6598, + "eval_samples_per_second": 18.302, + "eval_steps_per_second": 2.343, + "step": 300 + }, + { + "epoch": 0.43, + "learning_rate": 8.767535070140282e-06, + "loss": 1.6362, + "step": 325 + }, + { + "epoch": 0.43, + "eval_loss": 1.7418254613876343, + "eval_runtime": 13.299, + "eval_samples_per_second": 18.798, + "eval_steps_per_second": 2.406, + "step": 325 + }, + { + "epoch": 0.47, + "learning_rate": 7.515030060120242e-06, + "loss": 1.607, + "step": 350 + }, + { + "epoch": 0.47, + "eval_loss": 1.738572120666504, + "eval_runtime": 13.7472, + "eval_samples_per_second": 18.186, + "eval_steps_per_second": 2.328, + "step": 350 + }, + { + "epoch": 0.5, + "learning_rate": 6.2625250501002e-06, + "loss": 1.5168, + "step": 375 + }, + { + "epoch": 0.5, + "eval_loss": 1.7354249954223633, + "eval_runtime": 13.5456, + "eval_samples_per_second": 18.456, + "eval_steps_per_second": 2.362, + "step": 375 + }, + { + "epoch": 0.53, + "learning_rate": 5.010020040080161e-06, + "loss": 1.6947, + "step": 400 + }, + { + "epoch": 0.53, + "eval_loss": 1.7342841625213623, + "eval_runtime": 13.8125, + "eval_samples_per_second": 18.1, + "eval_steps_per_second": 2.317, + "step": 400 + }, + { + "epoch": 0.57, + "learning_rate": 3.757515030060121e-06, + "loss": 1.6161, + "step": 425 + }, + { + "epoch": 0.57, + "eval_loss": 1.7329128980636597, + "eval_runtime": 13.8207, + "eval_samples_per_second": 18.089, + "eval_steps_per_second": 2.315, + "step": 425 + }, + { + "epoch": 0.6, + "learning_rate": 2.5050100200400804e-06, + "loss": 1.6127, + "step": 450 + }, + { + "epoch": 0.6, + "eval_loss": 1.7326769828796387, + "eval_runtime": 13.8679, + "eval_samples_per_second": 18.027, + "eval_steps_per_second": 2.307, + "step": 450 + }, + { + "epoch": 0.63, + "learning_rate": 1.2525050100200402e-06, + "loss": 1.5995, + "step": 475 + }, + { + "epoch": 0.63, + "eval_loss": 1.733328104019165, + "eval_runtime": 13.6867, + "eval_samples_per_second": 18.266, + "eval_steps_per_second": 2.338, + "step": 475 + }, + { + "epoch": 0.67, + "learning_rate": 0.0, + "loss": 1.611, + "step": 500 + }, + { + "epoch": 0.67, + "eval_loss": 1.7329075336456299, + "eval_runtime": 13.645, + "eval_samples_per_second": 18.322, + "eval_steps_per_second": 2.345, + "step": 500 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 8217066209280000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-500/training_args.bin b/checkpoint-500/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-500/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728 diff --git a/checkpoint-75/README.md b/checkpoint-75/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de605040afe981efd7347112a2bd3d9ddfb19684 --- /dev/null +++ b/checkpoint-75/README.md @@ -0,0 +1,204 @@ +--- +library_name: peft +base_model: microsoft/phi-2 +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] + + +### Framework versions + +- PEFT 0.7.2.dev0 \ No newline at end of file diff --git a/checkpoint-75/adapter_config.json b/checkpoint-75/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2db55fa42720f3e778398fabefd1c209426aaf4 --- /dev/null +++ b/checkpoint-75/adapter_config.json @@ -0,0 +1,28 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "microsoft/phi-2", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 64, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 32, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "fc1", + "fc2", + "Wqkv" + ], + "task_type": "CAUSAL_LM", + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-75/adapter_model.safetensors b/checkpoint-75/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..52b3d3d55b00294ced38d9349f82815a0239f641 --- /dev/null +++ b/checkpoint-75/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3b3bcb5c266af5836931e4f87f79152e6d2d9aecd347fe6648ffcc662cccbeb +size 104873776 diff --git a/checkpoint-75/optimizer.pt b/checkpoint-75/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..2a45573803b2eb68a9de333232e05572a229e0a9 --- /dev/null +++ b/checkpoint-75/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d132c202e7ee17d086d362b8a1b32c8724ad910c0a97756883561490dd04a7a0 +size 52680378 diff --git a/checkpoint-75/rng_state.pth b/checkpoint-75/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..d119f9bb3759e4e6b9b2b593a9cdeaf0b58fdb08 --- /dev/null +++ b/checkpoint-75/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:477b2a0b9ecb7613e5b783feee1b1898ee2620927cd03bc568e83d2cf52635f0 +size 14244 diff --git a/checkpoint-75/scheduler.pt b/checkpoint-75/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..712fd50c9622831af5138bfbad2e78a813734888 --- /dev/null +++ b/checkpoint-75/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bae9ac67ebeb2bb2143a2ce450a4df38425b5f402e73d79278e6987cbd86eb8 +size 1064 diff --git a/checkpoint-75/trainer_state.json b/checkpoint-75/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..ddb98d30523259b31f9b54b1be06d7524080cfec --- /dev/null +++ b/checkpoint-75/trainer_state.json @@ -0,0 +1,63 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.1, + "eval_steps": 25, + "global_step": 75, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.03, + "learning_rate": 2.3797595190380762e-05, + "loss": 3.5104, + "step": 25 + }, + { + "epoch": 0.03, + "eval_loss": 2.643505334854126, + "eval_runtime": 13.8992, + "eval_samples_per_second": 17.987, + "eval_steps_per_second": 2.302, + "step": 25 + }, + { + "epoch": 0.07, + "learning_rate": 2.2545090180360722e-05, + "loss": 2.2052, + "step": 50 + }, + { + "epoch": 0.07, + "eval_loss": 1.966366171836853, + "eval_runtime": 13.6092, + "eval_samples_per_second": 18.37, + "eval_steps_per_second": 2.351, + "step": 50 + }, + { + "epoch": 0.1, + "learning_rate": 2.1292585170340683e-05, + "loss": 1.81, + "step": 75 + }, + { + "epoch": 0.1, + "eval_loss": 1.8134992122650146, + "eval_runtime": 13.5125, + "eval_samples_per_second": 18.501, + "eval_steps_per_second": 2.368, + "step": 75 + } + ], + "logging_steps": 25, + "max_steps": 500, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 25, + "total_flos": 1232559931392000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-75/training_args.bin b/checkpoint-75/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..325e70156f50a1004dd17f03c6a72eae34ab817a --- /dev/null +++ b/checkpoint-75/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f539f4dad423afd20fdebce05e8744780e9ed30b55cfb57c0e5421e453ed0ea1 +size 4728