FINGU-AI commited on
Commit
edb8dc4
1 Parent(s): 0765c4a

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,202 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-32B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.13.3.dev0
adapter_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-32B-Instruct",
5
+ "bias": "none",
6
+ "exclude_modules": null,
7
+ "fan_in_fan_out": false,
8
+ "inference_mode": true,
9
+ "init_lora_weights": true,
10
+ "layer_replication": null,
11
+ "layers_pattern": null,
12
+ "layers_to_transform": null,
13
+ "loftq_config": {},
14
+ "lora_alpha": 32,
15
+ "lora_dropout": 0,
16
+ "megatron_config": null,
17
+ "megatron_core": "megatron.core",
18
+ "modules_to_save": null,
19
+ "peft_type": "LORA",
20
+ "r": 32,
21
+ "rank_pattern": {},
22
+ "revision": null,
23
+ "target_modules": [
24
+ "q_proj",
25
+ "v_proj",
26
+ "k_proj",
27
+ "down_proj",
28
+ "gate_proj",
29
+ "o_proj",
30
+ "up_proj"
31
+ ],
32
+ "task_type": "CAUSAL_LM",
33
+ "use_dora": false,
34
+ "use_rslora": false
35
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:010ccb3e39df21ab4f27a8a2fd7bde0e487a4ef8fdd1b9ce5e563ab67ca126f4
3
+ size 1073863208
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab5a53138f1ceed221666e7bf652ee9524302ddf8a576af0b727e25ddfcda1e6
3
+ size 2148241810
rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d39cd9103dbf8c8cd97b81547e76bda1e399bc302019afd3e728600c27b291c6
3
+ size 14960
rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:725b628dcb9bde851daa5e74b4c0a3220bd297b9f546d963258de38c2c74a34a
3
+ size 14960
rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b120970110f283918dd120ebfb8672ff17382fd08cb1da667afb937a9580b07b
3
+ size 14960
rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e25a037b97322fadf9ebe1fe36261ef9d02727f6e6ca13823f2c3a9cba80f7a
3
+ size 14960
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94585a59d26998a3050bb6d2aaa1fe47367d332b91284936f8663b19c7b9c5e4
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": "<|im_end|>"
25
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "model_max_length": 131072,
203
+ "pad_token": "<|im_end|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
trainer_state.json ADDED
@@ -0,0 +1,625 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.146509341199607,
5
+ "eval_steps": 100,
6
+ "global_step": 400,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03933136676499508,
13
+ "grad_norm": 0.481609046459198,
14
+ "learning_rate": 0.00015,
15
+ "loss": 2.0722,
16
+ "step": 5
17
+ },
18
+ {
19
+ "epoch": 0.07866273352999016,
20
+ "grad_norm": 0.15720224380493164,
21
+ "learning_rate": 0.0003,
22
+ "loss": 1.4825,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 0.11799410029498525,
27
+ "grad_norm": 0.06716315448284149,
28
+ "learning_rate": 0.00029759999999999997,
29
+ "loss": 1.3333,
30
+ "step": 15
31
+ },
32
+ {
33
+ "epoch": 0.15732546705998032,
34
+ "grad_norm": 0.06133478134870529,
35
+ "learning_rate": 0.00029519999999999997,
36
+ "loss": 1.2341,
37
+ "step": 20
38
+ },
39
+ {
40
+ "epoch": 0.19665683382497542,
41
+ "grad_norm": 0.07264667749404907,
42
+ "learning_rate": 0.00029279999999999996,
43
+ "loss": 1.1756,
44
+ "step": 25
45
+ },
46
+ {
47
+ "epoch": 0.2359882005899705,
48
+ "grad_norm": 0.07928217202425003,
49
+ "learning_rate": 0.00029039999999999996,
50
+ "loss": 1.1197,
51
+ "step": 30
52
+ },
53
+ {
54
+ "epoch": 0.2753195673549656,
55
+ "grad_norm": 0.09420346468687057,
56
+ "learning_rate": 0.00028799999999999995,
57
+ "loss": 1.0834,
58
+ "step": 35
59
+ },
60
+ {
61
+ "epoch": 0.31465093411996065,
62
+ "grad_norm": 0.0862259566783905,
63
+ "learning_rate": 0.00028559999999999995,
64
+ "loss": 1.044,
65
+ "step": 40
66
+ },
67
+ {
68
+ "epoch": 0.35398230088495575,
69
+ "grad_norm": 0.09086894243955612,
70
+ "learning_rate": 0.00028319999999999994,
71
+ "loss": 1.0205,
72
+ "step": 45
73
+ },
74
+ {
75
+ "epoch": 0.39331366764995085,
76
+ "grad_norm": 0.08469890058040619,
77
+ "learning_rate": 0.0002808,
78
+ "loss": 0.9798,
79
+ "step": 50
80
+ },
81
+ {
82
+ "epoch": 0.4326450344149459,
83
+ "grad_norm": 0.10012397915124893,
84
+ "learning_rate": 0.0002784,
85
+ "loss": 0.9811,
86
+ "step": 55
87
+ },
88
+ {
89
+ "epoch": 0.471976401179941,
90
+ "grad_norm": 0.08633492887020111,
91
+ "learning_rate": 0.000276,
92
+ "loss": 0.9556,
93
+ "step": 60
94
+ },
95
+ {
96
+ "epoch": 0.511307767944936,
97
+ "grad_norm": 0.09879346191883087,
98
+ "learning_rate": 0.0002736,
99
+ "loss": 0.9446,
100
+ "step": 65
101
+ },
102
+ {
103
+ "epoch": 0.5506391347099312,
104
+ "grad_norm": 0.08795857429504395,
105
+ "learning_rate": 0.0002712,
106
+ "loss": 0.9228,
107
+ "step": 70
108
+ },
109
+ {
110
+ "epoch": 0.5899705014749262,
111
+ "grad_norm": 0.0837111845612526,
112
+ "learning_rate": 0.0002688,
113
+ "loss": 0.9279,
114
+ "step": 75
115
+ },
116
+ {
117
+ "epoch": 0.6293018682399213,
118
+ "grad_norm": 0.08551318198442459,
119
+ "learning_rate": 0.00026639999999999997,
120
+ "loss": 0.9267,
121
+ "step": 80
122
+ },
123
+ {
124
+ "epoch": 0.6686332350049164,
125
+ "grad_norm": 0.08481767773628235,
126
+ "learning_rate": 0.00026399999999999997,
127
+ "loss": 0.9082,
128
+ "step": 85
129
+ },
130
+ {
131
+ "epoch": 0.7079646017699115,
132
+ "grad_norm": 0.100365050137043,
133
+ "learning_rate": 0.00026159999999999996,
134
+ "loss": 0.9028,
135
+ "step": 90
136
+ },
137
+ {
138
+ "epoch": 0.7472959685349065,
139
+ "grad_norm": 0.08463772386312485,
140
+ "learning_rate": 0.00025919999999999996,
141
+ "loss": 0.8866,
142
+ "step": 95
143
+ },
144
+ {
145
+ "epoch": 0.7866273352999017,
146
+ "grad_norm": 0.09628409892320633,
147
+ "learning_rate": 0.00025679999999999995,
148
+ "loss": 0.8787,
149
+ "step": 100
150
+ },
151
+ {
152
+ "epoch": 0.7866273352999017,
153
+ "eval_loss": 0.8853636980056763,
154
+ "eval_runtime": 24.3719,
155
+ "eval_samples_per_second": 6.729,
156
+ "eval_steps_per_second": 0.862,
157
+ "step": 100
158
+ },
159
+ {
160
+ "epoch": 0.8259587020648967,
161
+ "grad_norm": 0.08835043758153915,
162
+ "learning_rate": 0.00025439999999999995,
163
+ "loss": 0.8786,
164
+ "step": 105
165
+ },
166
+ {
167
+ "epoch": 0.8652900688298918,
168
+ "grad_norm": 0.09190791845321655,
169
+ "learning_rate": 0.00025199999999999995,
170
+ "loss": 0.8693,
171
+ "step": 110
172
+ },
173
+ {
174
+ "epoch": 0.904621435594887,
175
+ "grad_norm": 0.08965795487165451,
176
+ "learning_rate": 0.00024959999999999994,
177
+ "loss": 0.8772,
178
+ "step": 115
179
+ },
180
+ {
181
+ "epoch": 0.943952802359882,
182
+ "grad_norm": 0.09055910259485245,
183
+ "learning_rate": 0.0002472,
184
+ "loss": 0.867,
185
+ "step": 120
186
+ },
187
+ {
188
+ "epoch": 0.983284169124877,
189
+ "grad_norm": 0.09172637015581131,
190
+ "learning_rate": 0.0002448,
191
+ "loss": 0.8536,
192
+ "step": 125
193
+ },
194
+ {
195
+ "epoch": 1.022615535889872,
196
+ "grad_norm": 0.10374542325735092,
197
+ "learning_rate": 0.00024239999999999998,
198
+ "loss": 0.9888,
199
+ "step": 130
200
+ },
201
+ {
202
+ "epoch": 1.0619469026548674,
203
+ "grad_norm": 0.08842068910598755,
204
+ "learning_rate": 0.00023999999999999998,
205
+ "loss": 0.8443,
206
+ "step": 135
207
+ },
208
+ {
209
+ "epoch": 1.1012782694198624,
210
+ "grad_norm": 0.0736837387084961,
211
+ "learning_rate": 0.0002376,
212
+ "loss": 0.8457,
213
+ "step": 140
214
+ },
215
+ {
216
+ "epoch": 1.1406096361848574,
217
+ "grad_norm": 0.07575016468763351,
218
+ "learning_rate": 0.0002352,
219
+ "loss": 0.8335,
220
+ "step": 145
221
+ },
222
+ {
223
+ "epoch": 1.1799410029498525,
224
+ "grad_norm": 0.07092955708503723,
225
+ "learning_rate": 0.0002328,
226
+ "loss": 0.8246,
227
+ "step": 150
228
+ },
229
+ {
230
+ "epoch": 1.2192723697148475,
231
+ "grad_norm": 0.077423095703125,
232
+ "learning_rate": 0.0002304,
233
+ "loss": 0.823,
234
+ "step": 155
235
+ },
236
+ {
237
+ "epoch": 1.2586037364798428,
238
+ "grad_norm": 0.07389391213655472,
239
+ "learning_rate": 0.00022799999999999999,
240
+ "loss": 0.819,
241
+ "step": 160
242
+ },
243
+ {
244
+ "epoch": 1.2979351032448379,
245
+ "grad_norm": 0.08229434490203857,
246
+ "learning_rate": 0.00022559999999999998,
247
+ "loss": 0.8181,
248
+ "step": 165
249
+ },
250
+ {
251
+ "epoch": 1.337266470009833,
252
+ "grad_norm": 0.07665972411632538,
253
+ "learning_rate": 0.00022319999999999998,
254
+ "loss": 0.8118,
255
+ "step": 170
256
+ },
257
+ {
258
+ "epoch": 1.376597836774828,
259
+ "grad_norm": 0.09001573175191879,
260
+ "learning_rate": 0.00022079999999999997,
261
+ "loss": 0.8157,
262
+ "step": 175
263
+ },
264
+ {
265
+ "epoch": 1.415929203539823,
266
+ "grad_norm": 0.07965826243162155,
267
+ "learning_rate": 0.00021839999999999997,
268
+ "loss": 0.8111,
269
+ "step": 180
270
+ },
271
+ {
272
+ "epoch": 1.455260570304818,
273
+ "grad_norm": 0.08642959594726562,
274
+ "learning_rate": 0.00021599999999999996,
275
+ "loss": 0.8003,
276
+ "step": 185
277
+ },
278
+ {
279
+ "epoch": 1.494591937069813,
280
+ "grad_norm": 0.0749087929725647,
281
+ "learning_rate": 0.00021359999999999996,
282
+ "loss": 0.7975,
283
+ "step": 190
284
+ },
285
+ {
286
+ "epoch": 1.5339233038348081,
287
+ "grad_norm": 0.08575734496116638,
288
+ "learning_rate": 0.00021119999999999996,
289
+ "loss": 0.7888,
290
+ "step": 195
291
+ },
292
+ {
293
+ "epoch": 1.5732546705998034,
294
+ "grad_norm": 0.0887129157781601,
295
+ "learning_rate": 0.00020879999999999998,
296
+ "loss": 0.7857,
297
+ "step": 200
298
+ },
299
+ {
300
+ "epoch": 1.5732546705998034,
301
+ "eval_loss": 0.8026237487792969,
302
+ "eval_runtime": 24.2397,
303
+ "eval_samples_per_second": 6.766,
304
+ "eval_steps_per_second": 0.866,
305
+ "step": 200
306
+ },
307
+ {
308
+ "epoch": 1.6125860373647984,
309
+ "grad_norm": 0.0926935002207756,
310
+ "learning_rate": 0.00020639999999999998,
311
+ "loss": 0.7877,
312
+ "step": 205
313
+ },
314
+ {
315
+ "epoch": 1.6519174041297935,
316
+ "grad_norm": 0.08537031710147858,
317
+ "learning_rate": 0.000204,
318
+ "loss": 0.7767,
319
+ "step": 210
320
+ },
321
+ {
322
+ "epoch": 1.6912487708947888,
323
+ "grad_norm": 0.0766814798116684,
324
+ "learning_rate": 0.0002016,
325
+ "loss": 0.785,
326
+ "step": 215
327
+ },
328
+ {
329
+ "epoch": 1.7305801376597838,
330
+ "grad_norm": 0.08394207805395126,
331
+ "learning_rate": 0.0001992,
332
+ "loss": 0.7832,
333
+ "step": 220
334
+ },
335
+ {
336
+ "epoch": 1.7699115044247788,
337
+ "grad_norm": 0.0813060775399208,
338
+ "learning_rate": 0.00019679999999999999,
339
+ "loss": 0.7766,
340
+ "step": 225
341
+ },
342
+ {
343
+ "epoch": 1.809242871189774,
344
+ "grad_norm": 0.08242856711149216,
345
+ "learning_rate": 0.00019439999999999998,
346
+ "loss": 0.7775,
347
+ "step": 230
348
+ },
349
+ {
350
+ "epoch": 1.848574237954769,
351
+ "grad_norm": 0.07610878348350525,
352
+ "learning_rate": 0.00019199999999999998,
353
+ "loss": 0.7736,
354
+ "step": 235
355
+ },
356
+ {
357
+ "epoch": 1.887905604719764,
358
+ "grad_norm": 0.08326178044080734,
359
+ "learning_rate": 0.00018959999999999997,
360
+ "loss": 0.7753,
361
+ "step": 240
362
+ },
363
+ {
364
+ "epoch": 1.927236971484759,
365
+ "grad_norm": 0.09425383061170578,
366
+ "learning_rate": 0.0001872,
367
+ "loss": 0.7577,
368
+ "step": 245
369
+ },
370
+ {
371
+ "epoch": 1.966568338249754,
372
+ "grad_norm": 0.08694498240947723,
373
+ "learning_rate": 0.0001848,
374
+ "loss": 0.7606,
375
+ "step": 250
376
+ },
377
+ {
378
+ "epoch": 2.005899705014749,
379
+ "grad_norm": 0.22805309295654297,
380
+ "learning_rate": 0.0001824,
381
+ "loss": 0.8871,
382
+ "step": 255
383
+ },
384
+ {
385
+ "epoch": 2.045231071779744,
386
+ "grad_norm": 0.09610473364591599,
387
+ "learning_rate": 0.00017999999999999998,
388
+ "loss": 0.7315,
389
+ "step": 260
390
+ },
391
+ {
392
+ "epoch": 2.084562438544739,
393
+ "grad_norm": 0.09666857868432999,
394
+ "learning_rate": 0.00017759999999999998,
395
+ "loss": 0.7315,
396
+ "step": 265
397
+ },
398
+ {
399
+ "epoch": 2.1238938053097347,
400
+ "grad_norm": 0.09328849613666534,
401
+ "learning_rate": 0.00017519999999999998,
402
+ "loss": 0.7344,
403
+ "step": 270
404
+ },
405
+ {
406
+ "epoch": 2.1632251720747298,
407
+ "grad_norm": 0.08137473464012146,
408
+ "learning_rate": 0.00017279999999999997,
409
+ "loss": 0.7347,
410
+ "step": 275
411
+ },
412
+ {
413
+ "epoch": 2.202556538839725,
414
+ "grad_norm": 0.08166103810071945,
415
+ "learning_rate": 0.00017039999999999997,
416
+ "loss": 0.7281,
417
+ "step": 280
418
+ },
419
+ {
420
+ "epoch": 2.24188790560472,
421
+ "grad_norm": 0.08074019104242325,
422
+ "learning_rate": 0.000168,
423
+ "loss": 0.7345,
424
+ "step": 285
425
+ },
426
+ {
427
+ "epoch": 2.281219272369715,
428
+ "grad_norm": 0.08479057997465134,
429
+ "learning_rate": 0.0001656,
430
+ "loss": 0.726,
431
+ "step": 290
432
+ },
433
+ {
434
+ "epoch": 2.32055063913471,
435
+ "grad_norm": 0.08091601729393005,
436
+ "learning_rate": 0.0001632,
437
+ "loss": 0.7184,
438
+ "step": 295
439
+ },
440
+ {
441
+ "epoch": 2.359882005899705,
442
+ "grad_norm": 0.08470489084720612,
443
+ "learning_rate": 0.0001608,
444
+ "loss": 0.7233,
445
+ "step": 300
446
+ },
447
+ {
448
+ "epoch": 2.359882005899705,
449
+ "eval_loss": 0.7612683176994324,
450
+ "eval_runtime": 24.27,
451
+ "eval_samples_per_second": 6.757,
452
+ "eval_steps_per_second": 0.865,
453
+ "step": 300
454
+ },
455
+ {
456
+ "epoch": 2.3992133726647,
457
+ "grad_norm": 0.08677177131175995,
458
+ "learning_rate": 0.0001584,
459
+ "loss": 0.721,
460
+ "step": 305
461
+ },
462
+ {
463
+ "epoch": 2.438544739429695,
464
+ "grad_norm": 0.08474377542734146,
465
+ "learning_rate": 0.000156,
466
+ "loss": 0.7141,
467
+ "step": 310
468
+ },
469
+ {
470
+ "epoch": 2.47787610619469,
471
+ "grad_norm": 0.08565227687358856,
472
+ "learning_rate": 0.0001536,
473
+ "loss": 0.7173,
474
+ "step": 315
475
+ },
476
+ {
477
+ "epoch": 2.5172074729596856,
478
+ "grad_norm": 0.08714301139116287,
479
+ "learning_rate": 0.0001512,
480
+ "loss": 0.7274,
481
+ "step": 320
482
+ },
483
+ {
484
+ "epoch": 2.5565388397246807,
485
+ "grad_norm": 0.0934271439909935,
486
+ "learning_rate": 0.00014879999999999998,
487
+ "loss": 0.7263,
488
+ "step": 325
489
+ },
490
+ {
491
+ "epoch": 2.5958702064896757,
492
+ "grad_norm": 0.08581375330686569,
493
+ "learning_rate": 0.00014639999999999998,
494
+ "loss": 0.7248,
495
+ "step": 330
496
+ },
497
+ {
498
+ "epoch": 2.6352015732546707,
499
+ "grad_norm": 0.08378680050373077,
500
+ "learning_rate": 0.00014399999999999998,
501
+ "loss": 0.721,
502
+ "step": 335
503
+ },
504
+ {
505
+ "epoch": 2.674532940019666,
506
+ "grad_norm": 0.08449660986661911,
507
+ "learning_rate": 0.00014159999999999997,
508
+ "loss": 0.7156,
509
+ "step": 340
510
+ },
511
+ {
512
+ "epoch": 2.713864306784661,
513
+ "grad_norm": 0.08646751940250397,
514
+ "learning_rate": 0.0001392,
515
+ "loss": 0.7094,
516
+ "step": 345
517
+ },
518
+ {
519
+ "epoch": 2.753195673549656,
520
+ "grad_norm": 0.08911272883415222,
521
+ "learning_rate": 0.0001368,
522
+ "loss": 0.709,
523
+ "step": 350
524
+ },
525
+ {
526
+ "epoch": 2.792527040314651,
527
+ "grad_norm": 0.0970829427242279,
528
+ "learning_rate": 0.0001344,
529
+ "loss": 0.7107,
530
+ "step": 355
531
+ },
532
+ {
533
+ "epoch": 2.831858407079646,
534
+ "grad_norm": 0.0854572132229805,
535
+ "learning_rate": 0.00013199999999999998,
536
+ "loss": 0.7148,
537
+ "step": 360
538
+ },
539
+ {
540
+ "epoch": 2.871189773844641,
541
+ "grad_norm": 0.08210612088441849,
542
+ "learning_rate": 0.00012959999999999998,
543
+ "loss": 0.7132,
544
+ "step": 365
545
+ },
546
+ {
547
+ "epoch": 2.910521140609636,
548
+ "grad_norm": 0.0925467386841774,
549
+ "learning_rate": 0.00012719999999999997,
550
+ "loss": 0.7201,
551
+ "step": 370
552
+ },
553
+ {
554
+ "epoch": 2.949852507374631,
555
+ "grad_norm": 0.09149914979934692,
556
+ "learning_rate": 0.00012479999999999997,
557
+ "loss": 0.7086,
558
+ "step": 375
559
+ },
560
+ {
561
+ "epoch": 2.989183874139626,
562
+ "grad_norm": 0.0827464610338211,
563
+ "learning_rate": 0.0001224,
564
+ "loss": 0.7102,
565
+ "step": 380
566
+ },
567
+ {
568
+ "epoch": 3.0285152409046217,
569
+ "grad_norm": 0.09861475974321365,
570
+ "learning_rate": 0.00011999999999999999,
571
+ "loss": 0.8086,
572
+ "step": 385
573
+ },
574
+ {
575
+ "epoch": 3.0678466076696167,
576
+ "grad_norm": 0.09810496121644974,
577
+ "learning_rate": 0.0001176,
578
+ "loss": 0.6784,
579
+ "step": 390
580
+ },
581
+ {
582
+ "epoch": 3.1071779744346117,
583
+ "grad_norm": 0.08657824248075485,
584
+ "learning_rate": 0.0001152,
585
+ "loss": 0.6818,
586
+ "step": 395
587
+ },
588
+ {
589
+ "epoch": 3.146509341199607,
590
+ "grad_norm": 0.08861815184354782,
591
+ "learning_rate": 0.00011279999999999999,
592
+ "loss": 0.6755,
593
+ "step": 400
594
+ },
595
+ {
596
+ "epoch": 3.146509341199607,
597
+ "eval_loss": 0.7408613562583923,
598
+ "eval_runtime": 24.2895,
599
+ "eval_samples_per_second": 6.752,
600
+ "eval_steps_per_second": 0.865,
601
+ "step": 400
602
+ }
603
+ ],
604
+ "logging_steps": 5,
605
+ "max_steps": 635,
606
+ "num_input_tokens_seen": 0,
607
+ "num_train_epochs": 5,
608
+ "save_steps": 100,
609
+ "stateful_callbacks": {
610
+ "TrainerControl": {
611
+ "args": {
612
+ "should_epoch_stop": false,
613
+ "should_evaluate": false,
614
+ "should_log": false,
615
+ "should_save": true,
616
+ "should_training_stop": false
617
+ },
618
+ "attributes": {}
619
+ }
620
+ },
621
+ "total_flos": 2.0292300729210634e+19,
622
+ "train_batch_size": 4,
623
+ "trial_name": null,
624
+ "trial_params": null
625
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89677001dcae53eb34c62bd4fd359147dc9fa1f8863bb47ac3eec0335fe11602
3
+ size 5496
vocab.json ADDED
The diff for this file is too large to render. See raw diff