htahir1 commited on
Commit
fc4465d
1 Parent(s): 05a3feb

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +202 -1
  2. adapter_config.json +29 -0
  3. adapter_model.safetensors +3 -0
  4. checkpoint-100/README.md +204 -0
  5. checkpoint-100/adapter_config.json +29 -0
  6. checkpoint-100/adapter_model.safetensors +3 -0
  7. checkpoint-100/optimizer.pt +3 -0
  8. checkpoint-100/rng_state.pth +3 -0
  9. checkpoint-100/scheduler.pt +3 -0
  10. checkpoint-100/trainer_state.json +53 -0
  11. checkpoint-100/training_args.bin +3 -0
  12. checkpoint-1000/README.md +204 -0
  13. checkpoint-1000/adapter_config.json +29 -0
  14. checkpoint-1000/adapter_model.safetensors +3 -0
  15. checkpoint-1000/optimizer.pt +3 -0
  16. checkpoint-1000/rng_state.pth +3 -0
  17. checkpoint-1000/scheduler.pt +3 -0
  18. checkpoint-1000/trainer_state.json +341 -0
  19. checkpoint-1000/training_args.bin +3 -0
  20. checkpoint-200/README.md +204 -0
  21. checkpoint-200/adapter_config.json +29 -0
  22. checkpoint-200/adapter_model.safetensors +3 -0
  23. checkpoint-200/optimizer.pt +3 -0
  24. checkpoint-200/rng_state.pth +3 -0
  25. checkpoint-200/scheduler.pt +3 -0
  26. checkpoint-200/trainer_state.json +85 -0
  27. checkpoint-200/training_args.bin +3 -0
  28. checkpoint-300/README.md +204 -0
  29. checkpoint-300/adapter_config.json +29 -0
  30. checkpoint-300/adapter_model.safetensors +3 -0
  31. checkpoint-300/optimizer.pt +3 -0
  32. checkpoint-300/rng_state.pth +3 -0
  33. checkpoint-300/scheduler.pt +3 -0
  34. checkpoint-300/trainer_state.json +117 -0
  35. checkpoint-300/training_args.bin +3 -0
  36. checkpoint-400/README.md +204 -0
  37. checkpoint-400/adapter_config.json +29 -0
  38. checkpoint-400/adapter_model.safetensors +3 -0
  39. checkpoint-400/optimizer.pt +3 -0
  40. checkpoint-400/rng_state.pth +3 -0
  41. checkpoint-400/scheduler.pt +3 -0
  42. checkpoint-400/trainer_state.json +149 -0
  43. checkpoint-400/training_args.bin +3 -0
  44. checkpoint-500/README.md +204 -0
  45. checkpoint-500/adapter_config.json +29 -0
  46. checkpoint-500/adapter_model.safetensors +3 -0
  47. checkpoint-500/optimizer.pt +3 -0
  48. checkpoint-500/rng_state.pth +3 -0
  49. checkpoint-500/scheduler.pt +3 -0
  50. checkpoint-500/trainer_state.json +181 -0
README.md CHANGED
@@ -1,3 +1,204 @@
1
  ---
2
- license: bigcode-openrail-m
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: peft
3
+ base_model: bigcode/starcoder
4
  ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.7.2.dev0
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "bigcode/starcoder",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 32,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "c_fc",
23
+ "c_proj",
24
+ "c_attn",
25
+ "q_attn"
26
+ ],
27
+ "task_type": "CAUSAL_LM",
28
+ "use_rslora": false
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:495e43d13098fd20e268cc0b7e29b7ce202bd78c87b46b411c81898af04d9b90
3
+ size 55255584
checkpoint-100/README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: bigcode/starcoder
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.7.2.dev0
checkpoint-100/adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "bigcode/starcoder",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 32,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "c_fc",
23
+ "c_proj",
24
+ "c_attn",
25
+ "q_attn"
26
+ ],
27
+ "task_type": "CAUSAL_LM",
28
+ "use_rslora": false
29
+ }
checkpoint-100/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb4472b9d8d18b86805c351acc203625198bbf35920baa841afdc6ee12f5240f
3
+ size 55255584
checkpoint-100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82c7ac539ee39d3cb5e740b4054e9ef614187aa4557fff8989ac9bcfc8006a47
3
+ size 110696954
checkpoint-100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f903165bb36368f47dd9f2d97c529373babf7977e621de0dc0c839044562d263
3
+ size 14180
checkpoint-100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf139b2cd869934a6a802450d748b65f070f2fe1250b16bce2934376e88f03de
3
+ size 1064
checkpoint-100/trainer_state.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.1,
5
+ "eval_steps": 100,
6
+ "global_step": 100,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 0.0001666666666666667,
14
+ "loss": 0.8745,
15
+ "step": 25
16
+ },
17
+ {
18
+ "epoch": 0.05,
19
+ "learning_rate": 0.00019979028262377118,
20
+ "loss": 0.8093,
21
+ "step": 50
22
+ },
23
+ {
24
+ "epoch": 0.07,
25
+ "learning_rate": 0.00019893981312363562,
26
+ "loss": 0.7357,
27
+ "step": 75
28
+ },
29
+ {
30
+ "epoch": 0.1,
31
+ "learning_rate": 0.00019744105246469263,
32
+ "loss": 0.7535,
33
+ "step": 100
34
+ },
35
+ {
36
+ "epoch": 0.1,
37
+ "eval_loss": 0.4003306031227112,
38
+ "eval_runtime": 1.7839,
39
+ "eval_samples_per_second": 2.242,
40
+ "eval_steps_per_second": 0.561,
41
+ "step": 100
42
+ }
43
+ ],
44
+ "logging_steps": 25,
45
+ "max_steps": 1000,
46
+ "num_input_tokens_seen": 0,
47
+ "num_train_epochs": 9223372036854775807,
48
+ "save_steps": 100,
49
+ "total_flos": 1.493507298557952e+17,
50
+ "train_batch_size": 4,
51
+ "trial_name": null,
52
+ "trial_params": null
53
+ }
checkpoint-100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fcc93ccb3f87a3311bb3baa8290949012608d857aa7e1f8e40c50e3c4f99548
3
+ size 4792
checkpoint-1000/README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: bigcode/starcoder
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.7.2.dev0
checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "bigcode/starcoder",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 32,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "c_fc",
23
+ "c_proj",
24
+ "c_attn",
25
+ "q_attn"
26
+ ],
27
+ "task_type": "CAUSAL_LM",
28
+ "use_rslora": false
29
+ }
checkpoint-1000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:495e43d13098fd20e268cc0b7e29b7ce202bd78c87b46b411c81898af04d9b90
3
+ size 55255584
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbc36e1c878784099b46f5b9a718b335165f7613c64f61d059016e0cedfaa033
3
+ size 110696954
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1dc61da6425213a4ed0c6718da9534c16f880b198a814db7fecbd699176650b
3
+ size 14244
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7b5bf190dc871967c45091d9f1ab233b2d2ed62baca21fee5dfedb5718ffa5d
3
+ size 1064
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 100,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 0.0001666666666666667,
14
+ "loss": 0.8745,
15
+ "step": 25
16
+ },
17
+ {
18
+ "epoch": 0.05,
19
+ "learning_rate": 0.00019979028262377118,
20
+ "loss": 0.8093,
21
+ "step": 50
22
+ },
23
+ {
24
+ "epoch": 0.07,
25
+ "learning_rate": 0.00019893981312363562,
26
+ "loss": 0.7357,
27
+ "step": 75
28
+ },
29
+ {
30
+ "epoch": 0.1,
31
+ "learning_rate": 0.00019744105246469263,
32
+ "loss": 0.7535,
33
+ "step": 100
34
+ },
35
+ {
36
+ "epoch": 0.1,
37
+ "eval_loss": 0.4003306031227112,
38
+ "eval_runtime": 1.7839,
39
+ "eval_samples_per_second": 2.242,
40
+ "eval_steps_per_second": 0.561,
41
+ "step": 100
42
+ },
43
+ {
44
+ "epoch": 0.12,
45
+ "learning_rate": 0.0001953038210948861,
46
+ "loss": 0.7249,
47
+ "step": 125
48
+ },
49
+ {
50
+ "epoch": 0.15,
51
+ "learning_rate": 0.00019254212296427044,
52
+ "loss": 0.7118,
53
+ "step": 150
54
+ },
55
+ {
56
+ "epoch": 0.17,
57
+ "learning_rate": 0.00018917405376582145,
58
+ "loss": 0.7467,
59
+ "step": 175
60
+ },
61
+ {
62
+ "epoch": 0.2,
63
+ "learning_rate": 0.00018522168236559695,
64
+ "loss": 0.6714,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 0.2,
69
+ "eval_loss": 0.3684937059879303,
70
+ "eval_runtime": 1.7853,
71
+ "eval_samples_per_second": 2.24,
72
+ "eval_steps_per_second": 0.56,
73
+ "step": 200
74
+ },
75
+ {
76
+ "epoch": 0.23,
77
+ "learning_rate": 0.00018071090619916093,
78
+ "loss": 0.654,
79
+ "step": 225
80
+ },
81
+ {
82
+ "epoch": 0.25,
83
+ "learning_rate": 0.00017567128158176953,
84
+ "loss": 0.6392,
85
+ "step": 250
86
+ },
87
+ {
88
+ "epoch": 0.28,
89
+ "learning_rate": 0.00017013583004418993,
90
+ "loss": 0.5745,
91
+ "step": 275
92
+ },
93
+ {
94
+ "epoch": 0.3,
95
+ "learning_rate": 0.000164140821963114,
96
+ "loss": 0.5364,
97
+ "step": 300
98
+ },
99
+ {
100
+ "epoch": 0.3,
101
+ "eval_loss": 0.3665352761745453,
102
+ "eval_runtime": 1.7852,
103
+ "eval_samples_per_second": 2.241,
104
+ "eval_steps_per_second": 0.56,
105
+ "step": 300
106
+ },
107
+ {
108
+ "epoch": 0.33,
109
+ "learning_rate": 0.00015772553890390197,
110
+ "loss": 0.5693,
111
+ "step": 325
112
+ },
113
+ {
114
+ "epoch": 0.35,
115
+ "learning_rate": 0.00015093201623287631,
116
+ "loss": 0.563,
117
+ "step": 350
118
+ },
119
+ {
120
+ "epoch": 0.38,
121
+ "learning_rate": 0.00014380476768566824,
122
+ "loss": 0.5478,
123
+ "step": 375
124
+ },
125
+ {
126
+ "epoch": 0.4,
127
+ "learning_rate": 0.00013639049369634876,
128
+ "loss": 0.5763,
129
+ "step": 400
130
+ },
131
+ {
132
+ "epoch": 0.4,
133
+ "eval_loss": 0.3363753855228424,
134
+ "eval_runtime": 1.7851,
135
+ "eval_samples_per_second": 2.241,
136
+ "eval_steps_per_second": 0.56,
137
+ "step": 400
138
+ },
139
+ {
140
+ "epoch": 0.42,
141
+ "learning_rate": 0.00012873777539848283,
142
+ "loss": 0.4891,
143
+ "step": 425
144
+ },
145
+ {
146
+ "epoch": 0.45,
147
+ "learning_rate": 0.00012089675630312754,
148
+ "loss": 0.5331,
149
+ "step": 450
150
+ },
151
+ {
152
+ "epoch": 0.47,
153
+ "learning_rate": 0.00011291881373954065,
154
+ "loss": 0.5679,
155
+ "step": 475
156
+ },
157
+ {
158
+ "epoch": 0.5,
159
+ "learning_rate": 0.00010485622221144484,
160
+ "loss": 0.5982,
161
+ "step": 500
162
+ },
163
+ {
164
+ "epoch": 0.5,
165
+ "eval_loss": 0.3186224400997162,
166
+ "eval_runtime": 1.7828,
167
+ "eval_samples_per_second": 2.244,
168
+ "eval_steps_per_second": 0.561,
169
+ "step": 500
170
+ },
171
+ {
172
+ "epoch": 0.53,
173
+ "learning_rate": 9.676181087466444e-05,
174
+ "loss": 0.5467,
175
+ "step": 525
176
+ },
177
+ {
178
+ "epoch": 0.55,
179
+ "learning_rate": 8.868861738047158e-05,
180
+ "loss": 0.5706,
181
+ "step": 550
182
+ },
183
+ {
184
+ "epoch": 0.57,
185
+ "learning_rate": 8.068954035279121e-05,
186
+ "loss": 0.504,
187
+ "step": 575
188
+ },
189
+ {
190
+ "epoch": 0.6,
191
+ "learning_rate": 7.281699277636572e-05,
192
+ "loss": 0.5267,
193
+ "step": 600
194
+ },
195
+ {
196
+ "epoch": 0.6,
197
+ "eval_loss": 0.32175499200820923,
198
+ "eval_runtime": 1.7846,
199
+ "eval_samples_per_second": 2.241,
200
+ "eval_steps_per_second": 0.56,
201
+ "step": 600
202
+ },
203
+ {
204
+ "epoch": 0.62,
205
+ "learning_rate": 6.512255856701177e-05,
206
+ "loss": 0.5414,
207
+ "step": 625
208
+ },
209
+ {
210
+ "epoch": 0.65,
211
+ "learning_rate": 5.765665457425102e-05,
212
+ "loss": 0.5412,
213
+ "step": 650
214
+ },
215
+ {
216
+ "epoch": 0.68,
217
+ "learning_rate": 5.0468200231001286e-05,
218
+ "loss": 0.4611,
219
+ "step": 675
220
+ },
221
+ {
222
+ "epoch": 0.7,
223
+ "learning_rate": 4.360429701490934e-05,
224
+ "loss": 0.5073,
225
+ "step": 700
226
+ },
227
+ {
228
+ "epoch": 0.7,
229
+ "eval_loss": 0.31275978684425354,
230
+ "eval_runtime": 1.7833,
231
+ "eval_samples_per_second": 2.243,
232
+ "eval_steps_per_second": 0.561,
233
+ "step": 700
234
+ },
235
+ {
236
+ "epoch": 0.72,
237
+ "learning_rate": 3.710991982161555e-05,
238
+ "loss": 0.4778,
239
+ "step": 725
240
+ },
241
+ {
242
+ "epoch": 0.75,
243
+ "learning_rate": 3.102762227218957e-05,
244
+ "loss": 0.5454,
245
+ "step": 750
246
+ },
247
+ {
248
+ "epoch": 0.78,
249
+ "learning_rate": 2.5397257885675397e-05,
250
+ "loss": 0.5612,
251
+ "step": 775
252
+ },
253
+ {
254
+ "epoch": 0.8,
255
+ "learning_rate": 2.025571894372794e-05,
256
+ "loss": 0.4983,
257
+ "step": 800
258
+ },
259
+ {
260
+ "epoch": 0.8,
261
+ "eval_loss": 0.31457942724227905,
262
+ "eval_runtime": 1.7821,
263
+ "eval_samples_per_second": 2.245,
264
+ "eval_steps_per_second": 0.561,
265
+ "step": 800
266
+ },
267
+ {
268
+ "epoch": 0.82,
269
+ "learning_rate": 1.563669475839956e-05,
270
+ "loss": 0.4941,
271
+ "step": 825
272
+ },
273
+ {
274
+ "epoch": 0.85,
275
+ "learning_rate": 1.1570450926997655e-05,
276
+ "loss": 0.4926,
277
+ "step": 850
278
+ },
279
+ {
280
+ "epoch": 0.88,
281
+ "learning_rate": 8.083631020418791e-06,
282
+ "loss": 0.5094,
283
+ "step": 875
284
+ },
285
+ {
286
+ "epoch": 0.9,
287
+ "learning_rate": 5.199082004372957e-06,
288
+ "loss": 0.5116,
289
+ "step": 900
290
+ },
291
+ {
292
+ "epoch": 0.9,
293
+ "eval_loss": 0.31357938051223755,
294
+ "eval_runtime": 1.7811,
295
+ "eval_samples_per_second": 2.246,
296
+ "eval_steps_per_second": 0.561,
297
+ "step": 900
298
+ },
299
+ {
300
+ "epoch": 0.93,
301
+ "learning_rate": 2.9357045374040825e-06,
302
+ "loss": 0.4244,
303
+ "step": 925
304
+ },
305
+ {
306
+ "epoch": 0.95,
307
+ "learning_rate": 1.30832912661093e-06,
308
+ "loss": 0.4511,
309
+ "step": 950
310
+ },
311
+ {
312
+ "epoch": 0.97,
313
+ "learning_rate": 3.2761895254306287e-07,
314
+ "loss": 0.4179,
315
+ "step": 975
316
+ },
317
+ {
318
+ "epoch": 1.0,
319
+ "learning_rate": 0.0,
320
+ "loss": 0.4662,
321
+ "step": 1000
322
+ },
323
+ {
324
+ "epoch": 1.0,
325
+ "eval_loss": 0.3136279284954071,
326
+ "eval_runtime": 1.7836,
327
+ "eval_samples_per_second": 2.243,
328
+ "eval_steps_per_second": 0.561,
329
+ "step": 1000
330
+ }
331
+ ],
332
+ "logging_steps": 25,
333
+ "max_steps": 1000,
334
+ "num_input_tokens_seen": 0,
335
+ "num_train_epochs": 9223372036854775807,
336
+ "save_steps": 100,
337
+ "total_flos": 1.493507298557952e+18,
338
+ "train_batch_size": 4,
339
+ "trial_name": null,
340
+ "trial_params": null
341
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fcc93ccb3f87a3311bb3baa8290949012608d857aa7e1f8e40c50e3c4f99548
3
+ size 4792
checkpoint-200/README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: bigcode/starcoder
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.7.2.dev0
checkpoint-200/adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "bigcode/starcoder",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 32,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "c_fc",
23
+ "c_proj",
24
+ "c_attn",
25
+ "q_attn"
26
+ ],
27
+ "task_type": "CAUSAL_LM",
28
+ "use_rslora": false
29
+ }
checkpoint-200/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f05f803645127df3c355c2f4280d7691d4ce9deb1fbc56f61247b55c7e5b719
3
+ size 55255584
checkpoint-200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee69d8793a2d6932e774ee12256f5321b4b714280b2d4d70141459f210dccc26
3
+ size 110696954
checkpoint-200/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:332b5f59525e9daeab689d4a9878243dd81ca2310bbe6cd23fa9e3060f182362
3
+ size 14244
checkpoint-200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0a17e229e361808d0d69c75ecdc1ef9a97dcbcbf9ffe72c26d29d2aceaec1f9
3
+ size 1064
checkpoint-200/trainer_state.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.2,
5
+ "eval_steps": 100,
6
+ "global_step": 200,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 0.0001666666666666667,
14
+ "loss": 0.8745,
15
+ "step": 25
16
+ },
17
+ {
18
+ "epoch": 0.05,
19
+ "learning_rate": 0.00019979028262377118,
20
+ "loss": 0.8093,
21
+ "step": 50
22
+ },
23
+ {
24
+ "epoch": 0.07,
25
+ "learning_rate": 0.00019893981312363562,
26
+ "loss": 0.7357,
27
+ "step": 75
28
+ },
29
+ {
30
+ "epoch": 0.1,
31
+ "learning_rate": 0.00019744105246469263,
32
+ "loss": 0.7535,
33
+ "step": 100
34
+ },
35
+ {
36
+ "epoch": 0.1,
37
+ "eval_loss": 0.4003306031227112,
38
+ "eval_runtime": 1.7839,
39
+ "eval_samples_per_second": 2.242,
40
+ "eval_steps_per_second": 0.561,
41
+ "step": 100
42
+ },
43
+ {
44
+ "epoch": 0.12,
45
+ "learning_rate": 0.0001953038210948861,
46
+ "loss": 0.7249,
47
+ "step": 125
48
+ },
49
+ {
50
+ "epoch": 0.15,
51
+ "learning_rate": 0.00019254212296427044,
52
+ "loss": 0.7118,
53
+ "step": 150
54
+ },
55
+ {
56
+ "epoch": 0.17,
57
+ "learning_rate": 0.00018917405376582145,
58
+ "loss": 0.7467,
59
+ "step": 175
60
+ },
61
+ {
62
+ "epoch": 0.2,
63
+ "learning_rate": 0.00018522168236559695,
64
+ "loss": 0.6714,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 0.2,
69
+ "eval_loss": 0.3684937059879303,
70
+ "eval_runtime": 1.7853,
71
+ "eval_samples_per_second": 2.24,
72
+ "eval_steps_per_second": 0.56,
73
+ "step": 200
74
+ }
75
+ ],
76
+ "logging_steps": 25,
77
+ "max_steps": 1000,
78
+ "num_input_tokens_seen": 0,
79
+ "num_train_epochs": 9223372036854775807,
80
+ "save_steps": 100,
81
+ "total_flos": 2.987014597115904e+17,
82
+ "train_batch_size": 4,
83
+ "trial_name": null,
84
+ "trial_params": null
85
+ }
checkpoint-200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fcc93ccb3f87a3311bb3baa8290949012608d857aa7e1f8e40c50e3c4f99548
3
+ size 4792
checkpoint-300/README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: bigcode/starcoder
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.7.2.dev0
checkpoint-300/adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "bigcode/starcoder",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 32,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "c_fc",
23
+ "c_proj",
24
+ "c_attn",
25
+ "q_attn"
26
+ ],
27
+ "task_type": "CAUSAL_LM",
28
+ "use_rslora": false
29
+ }
checkpoint-300/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2a461b4f667ec91e2a4a0f8ebb1a0beac3e7b119c3c7bd46e011e114d66acbe
3
+ size 55255584
checkpoint-300/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:490271b6f994cc5d0c62ef67f2239acfb833aef87a116a9b9be155f14dfd79f2
3
+ size 110696954
checkpoint-300/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ebe5ecb60a57db217626fc48593a1343b259e1412ab2cc0ce66d958d2f58062
3
+ size 14180
checkpoint-300/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62db676ea589f2e897f3ed22ee3133a534ed12d0dd978bfaec8bc59572ea976b
3
+ size 1064
checkpoint-300/trainer_state.json ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.3,
5
+ "eval_steps": 100,
6
+ "global_step": 300,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 0.0001666666666666667,
14
+ "loss": 0.8745,
15
+ "step": 25
16
+ },
17
+ {
18
+ "epoch": 0.05,
19
+ "learning_rate": 0.00019979028262377118,
20
+ "loss": 0.8093,
21
+ "step": 50
22
+ },
23
+ {
24
+ "epoch": 0.07,
25
+ "learning_rate": 0.00019893981312363562,
26
+ "loss": 0.7357,
27
+ "step": 75
28
+ },
29
+ {
30
+ "epoch": 0.1,
31
+ "learning_rate": 0.00019744105246469263,
32
+ "loss": 0.7535,
33
+ "step": 100
34
+ },
35
+ {
36
+ "epoch": 0.1,
37
+ "eval_loss": 0.4003306031227112,
38
+ "eval_runtime": 1.7839,
39
+ "eval_samples_per_second": 2.242,
40
+ "eval_steps_per_second": 0.561,
41
+ "step": 100
42
+ },
43
+ {
44
+ "epoch": 0.12,
45
+ "learning_rate": 0.0001953038210948861,
46
+ "loss": 0.7249,
47
+ "step": 125
48
+ },
49
+ {
50
+ "epoch": 0.15,
51
+ "learning_rate": 0.00019254212296427044,
52
+ "loss": 0.7118,
53
+ "step": 150
54
+ },
55
+ {
56
+ "epoch": 0.17,
57
+ "learning_rate": 0.00018917405376582145,
58
+ "loss": 0.7467,
59
+ "step": 175
60
+ },
61
+ {
62
+ "epoch": 0.2,
63
+ "learning_rate": 0.00018522168236559695,
64
+ "loss": 0.6714,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 0.2,
69
+ "eval_loss": 0.3684937059879303,
70
+ "eval_runtime": 1.7853,
71
+ "eval_samples_per_second": 2.24,
72
+ "eval_steps_per_second": 0.56,
73
+ "step": 200
74
+ },
75
+ {
76
+ "epoch": 0.23,
77
+ "learning_rate": 0.00018071090619916093,
78
+ "loss": 0.654,
79
+ "step": 225
80
+ },
81
+ {
82
+ "epoch": 0.25,
83
+ "learning_rate": 0.00017567128158176953,
84
+ "loss": 0.6392,
85
+ "step": 250
86
+ },
87
+ {
88
+ "epoch": 0.28,
89
+ "learning_rate": 0.00017013583004418993,
90
+ "loss": 0.5745,
91
+ "step": 275
92
+ },
93
+ {
94
+ "epoch": 0.3,
95
+ "learning_rate": 0.000164140821963114,
96
+ "loss": 0.5364,
97
+ "step": 300
98
+ },
99
+ {
100
+ "epoch": 0.3,
101
+ "eval_loss": 0.3665352761745453,
102
+ "eval_runtime": 1.7852,
103
+ "eval_samples_per_second": 2.241,
104
+ "eval_steps_per_second": 0.56,
105
+ "step": 300
106
+ }
107
+ ],
108
+ "logging_steps": 25,
109
+ "max_steps": 1000,
110
+ "num_input_tokens_seen": 0,
111
+ "num_train_epochs": 9223372036854775807,
112
+ "save_steps": 100,
113
+ "total_flos": 4.480521895673856e+17,
114
+ "train_batch_size": 4,
115
+ "trial_name": null,
116
+ "trial_params": null
117
+ }
checkpoint-300/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fcc93ccb3f87a3311bb3baa8290949012608d857aa7e1f8e40c50e3c4f99548
3
+ size 4792
checkpoint-400/README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: bigcode/starcoder
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.7.2.dev0
checkpoint-400/adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "bigcode/starcoder",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 32,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "c_fc",
23
+ "c_proj",
24
+ "c_attn",
25
+ "q_attn"
26
+ ],
27
+ "task_type": "CAUSAL_LM",
28
+ "use_rslora": false
29
+ }
checkpoint-400/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edaca769743a175eff4e0ef6bc5d4e4446f21774430a20fc43aa3c3670419ba9
3
+ size 55255584
checkpoint-400/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e62dc266a27e92f541dc5d52ccba7508e44b1ad73a79786b9a027debc324590
3
+ size 110696954
checkpoint-400/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71832fb3990c1059e14ec9109d9bf125f682c118937e9e5b1a3310b3e8be05ec
3
+ size 14244
checkpoint-400/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73179501dc4bcc1b0d3ff711880af909a9b84bb7d003a900c122d08331d45bfb
3
+ size 1064
checkpoint-400/trainer_state.json ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.4,
5
+ "eval_steps": 100,
6
+ "global_step": 400,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 0.0001666666666666667,
14
+ "loss": 0.8745,
15
+ "step": 25
16
+ },
17
+ {
18
+ "epoch": 0.05,
19
+ "learning_rate": 0.00019979028262377118,
20
+ "loss": 0.8093,
21
+ "step": 50
22
+ },
23
+ {
24
+ "epoch": 0.07,
25
+ "learning_rate": 0.00019893981312363562,
26
+ "loss": 0.7357,
27
+ "step": 75
28
+ },
29
+ {
30
+ "epoch": 0.1,
31
+ "learning_rate": 0.00019744105246469263,
32
+ "loss": 0.7535,
33
+ "step": 100
34
+ },
35
+ {
36
+ "epoch": 0.1,
37
+ "eval_loss": 0.4003306031227112,
38
+ "eval_runtime": 1.7839,
39
+ "eval_samples_per_second": 2.242,
40
+ "eval_steps_per_second": 0.561,
41
+ "step": 100
42
+ },
43
+ {
44
+ "epoch": 0.12,
45
+ "learning_rate": 0.0001953038210948861,
46
+ "loss": 0.7249,
47
+ "step": 125
48
+ },
49
+ {
50
+ "epoch": 0.15,
51
+ "learning_rate": 0.00019254212296427044,
52
+ "loss": 0.7118,
53
+ "step": 150
54
+ },
55
+ {
56
+ "epoch": 0.17,
57
+ "learning_rate": 0.00018917405376582145,
58
+ "loss": 0.7467,
59
+ "step": 175
60
+ },
61
+ {
62
+ "epoch": 0.2,
63
+ "learning_rate": 0.00018522168236559695,
64
+ "loss": 0.6714,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 0.2,
69
+ "eval_loss": 0.3684937059879303,
70
+ "eval_runtime": 1.7853,
71
+ "eval_samples_per_second": 2.24,
72
+ "eval_steps_per_second": 0.56,
73
+ "step": 200
74
+ },
75
+ {
76
+ "epoch": 0.23,
77
+ "learning_rate": 0.00018071090619916093,
78
+ "loss": 0.654,
79
+ "step": 225
80
+ },
81
+ {
82
+ "epoch": 0.25,
83
+ "learning_rate": 0.00017567128158176953,
84
+ "loss": 0.6392,
85
+ "step": 250
86
+ },
87
+ {
88
+ "epoch": 0.28,
89
+ "learning_rate": 0.00017013583004418993,
90
+ "loss": 0.5745,
91
+ "step": 275
92
+ },
93
+ {
94
+ "epoch": 0.3,
95
+ "learning_rate": 0.000164140821963114,
96
+ "loss": 0.5364,
97
+ "step": 300
98
+ },
99
+ {
100
+ "epoch": 0.3,
101
+ "eval_loss": 0.3665352761745453,
102
+ "eval_runtime": 1.7852,
103
+ "eval_samples_per_second": 2.241,
104
+ "eval_steps_per_second": 0.56,
105
+ "step": 300
106
+ },
107
+ {
108
+ "epoch": 0.33,
109
+ "learning_rate": 0.00015772553890390197,
110
+ "loss": 0.5693,
111
+ "step": 325
112
+ },
113
+ {
114
+ "epoch": 0.35,
115
+ "learning_rate": 0.00015093201623287631,
116
+ "loss": 0.563,
117
+ "step": 350
118
+ },
119
+ {
120
+ "epoch": 0.38,
121
+ "learning_rate": 0.00014380476768566824,
122
+ "loss": 0.5478,
123
+ "step": 375
124
+ },
125
+ {
126
+ "epoch": 0.4,
127
+ "learning_rate": 0.00013639049369634876,
128
+ "loss": 0.5763,
129
+ "step": 400
130
+ },
131
+ {
132
+ "epoch": 0.4,
133
+ "eval_loss": 0.3363753855228424,
134
+ "eval_runtime": 1.7851,
135
+ "eval_samples_per_second": 2.241,
136
+ "eval_steps_per_second": 0.56,
137
+ "step": 400
138
+ }
139
+ ],
140
+ "logging_steps": 25,
141
+ "max_steps": 1000,
142
+ "num_input_tokens_seen": 0,
143
+ "num_train_epochs": 9223372036854775807,
144
+ "save_steps": 100,
145
+ "total_flos": 5.974029194231808e+17,
146
+ "train_batch_size": 4,
147
+ "trial_name": null,
148
+ "trial_params": null
149
+ }
checkpoint-400/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fcc93ccb3f87a3311bb3baa8290949012608d857aa7e1f8e40c50e3c4f99548
3
+ size 4792
checkpoint-500/README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: bigcode/starcoder
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.7.2.dev0
checkpoint-500/adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "bigcode/starcoder",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 32,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "c_fc",
23
+ "c_proj",
24
+ "c_attn",
25
+ "q_attn"
26
+ ],
27
+ "task_type": "CAUSAL_LM",
28
+ "use_rslora": false
29
+ }
checkpoint-500/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3da466246316f1d6c831d2bca7fe024dc139f59f71a0bb8a12a79bf885467db4
3
+ size 55255584
checkpoint-500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d1dd841350fff6cb5403d07db622784417e503895aa7c470dc0bb46de7e37d6
3
+ size 110696954
checkpoint-500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f08f4c8427f0bcf80eabd18dc74ad53a2ae6e85f6226bc2a3da12c0c80968b99
3
+ size 14244
checkpoint-500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f51b657e3d38a2589f1fc9606eb9bdf1d6b09dd6934a23956cba0003ba32ad8
3
+ size 1064
checkpoint-500/trainer_state.json ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.5,
5
+ "eval_steps": 100,
6
+ "global_step": 500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 0.0001666666666666667,
14
+ "loss": 0.8745,
15
+ "step": 25
16
+ },
17
+ {
18
+ "epoch": 0.05,
19
+ "learning_rate": 0.00019979028262377118,
20
+ "loss": 0.8093,
21
+ "step": 50
22
+ },
23
+ {
24
+ "epoch": 0.07,
25
+ "learning_rate": 0.00019893981312363562,
26
+ "loss": 0.7357,
27
+ "step": 75
28
+ },
29
+ {
30
+ "epoch": 0.1,
31
+ "learning_rate": 0.00019744105246469263,
32
+ "loss": 0.7535,
33
+ "step": 100
34
+ },
35
+ {
36
+ "epoch": 0.1,
37
+ "eval_loss": 0.4003306031227112,
38
+ "eval_runtime": 1.7839,
39
+ "eval_samples_per_second": 2.242,
40
+ "eval_steps_per_second": 0.561,
41
+ "step": 100
42
+ },
43
+ {
44
+ "epoch": 0.12,
45
+ "learning_rate": 0.0001953038210948861,
46
+ "loss": 0.7249,
47
+ "step": 125
48
+ },
49
+ {
50
+ "epoch": 0.15,
51
+ "learning_rate": 0.00019254212296427044,
52
+ "loss": 0.7118,
53
+ "step": 150
54
+ },
55
+ {
56
+ "epoch": 0.17,
57
+ "learning_rate": 0.00018917405376582145,
58
+ "loss": 0.7467,
59
+ "step": 175
60
+ },
61
+ {
62
+ "epoch": 0.2,
63
+ "learning_rate": 0.00018522168236559695,
64
+ "loss": 0.6714,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 0.2,
69
+ "eval_loss": 0.3684937059879303,
70
+ "eval_runtime": 1.7853,
71
+ "eval_samples_per_second": 2.24,
72
+ "eval_steps_per_second": 0.56,
73
+ "step": 200
74
+ },
75
+ {
76
+ "epoch": 0.23,
77
+ "learning_rate": 0.00018071090619916093,
78
+ "loss": 0.654,
79
+ "step": 225
80
+ },
81
+ {
82
+ "epoch": 0.25,
83
+ "learning_rate": 0.00017567128158176953,
84
+ "loss": 0.6392,
85
+ "step": 250
86
+ },
87
+ {
88
+ "epoch": 0.28,
89
+ "learning_rate": 0.00017013583004418993,
90
+ "loss": 0.5745,
91
+ "step": 275
92
+ },
93
+ {
94
+ "epoch": 0.3,
95
+ "learning_rate": 0.000164140821963114,
96
+ "loss": 0.5364,
97
+ "step": 300
98
+ },
99
+ {
100
+ "epoch": 0.3,
101
+ "eval_loss": 0.3665352761745453,
102
+ "eval_runtime": 1.7852,
103
+ "eval_samples_per_second": 2.241,
104
+ "eval_steps_per_second": 0.56,
105
+ "step": 300
106
+ },
107
+ {
108
+ "epoch": 0.33,
109
+ "learning_rate": 0.00015772553890390197,
110
+ "loss": 0.5693,
111
+ "step": 325
112
+ },
113
+ {
114
+ "epoch": 0.35,
115
+ "learning_rate": 0.00015093201623287631,
116
+ "loss": 0.563,
117
+ "step": 350
118
+ },
119
+ {
120
+ "epoch": 0.38,
121
+ "learning_rate": 0.00014380476768566824,
122
+ "loss": 0.5478,
123
+ "step": 375
124
+ },
125
+ {
126
+ "epoch": 0.4,
127
+ "learning_rate": 0.00013639049369634876,
128
+ "loss": 0.5763,
129
+ "step": 400
130
+ },
131
+ {
132
+ "epoch": 0.4,
133
+ "eval_loss": 0.3363753855228424,
134
+ "eval_runtime": 1.7851,
135
+ "eval_samples_per_second": 2.241,
136
+ "eval_steps_per_second": 0.56,
137
+ "step": 400
138
+ },
139
+ {
140
+ "epoch": 0.42,
141
+ "learning_rate": 0.00012873777539848283,
142
+ "loss": 0.4891,
143
+ "step": 425
144
+ },
145
+ {
146
+ "epoch": 0.45,
147
+ "learning_rate": 0.00012089675630312754,
148
+ "loss": 0.5331,
149
+ "step": 450
150
+ },
151
+ {
152
+ "epoch": 0.47,
153
+ "learning_rate": 0.00011291881373954065,
154
+ "loss": 0.5679,
155
+ "step": 475
156
+ },
157
+ {
158
+ "epoch": 0.5,
159
+ "learning_rate": 0.00010485622221144484,
160
+ "loss": 0.5982,
161
+ "step": 500
162
+ },
163
+ {
164
+ "epoch": 0.5,
165
+ "eval_loss": 0.3186224400997162,
166
+ "eval_runtime": 1.7828,
167
+ "eval_samples_per_second": 2.244,
168
+ "eval_steps_per_second": 0.561,
169
+ "step": 500
170
+ }
171
+ ],
172
+ "logging_steps": 25,
173
+ "max_steps": 1000,
174
+ "num_input_tokens_seen": 0,
175
+ "num_train_epochs": 9223372036854775807,
176
+ "save_steps": 100,
177
+ "total_flos": 7.46753649278976e+17,
178
+ "train_batch_size": 4,
179
+ "trial_name": null,
180
+ "trial_params": null
181
+ }