Safetensors
qwen2
kalomaze commited on
Commit
605a3c3
·
verified ·
1 Parent(s): 4bab0ed

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Quest-AI/quest-corruption-14b-s110-r3",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "eos_token_id": 151643,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 5120,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 13824,
12
+ "max_position_embeddings": 131072,
13
+ "max_window_layers": 48,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 40,
16
+ "num_hidden_layers": 48,
17
+ "num_key_value_heads": 8,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_scaling": null,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": null,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.49.0",
25
+ "use_cache": false,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 152064
28
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": 151643,
5
+ "max_new_tokens": 2048,
6
+ "transformers_version": "4.49.0"
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c074ed937d71a26c39efeb0ad1a1ab5e6b27c256127f640ca56e994ae00877c
3
+ size 4986211280
model-00002-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b7ea86e4605994cfead38009a634da522c1a01d8b323e0521ed98a2e4c1605b
3
+ size 4954847344
model-00003-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c919064f4c6f83c40fe74f95736857d84d99d3d0761588cc0794fcc2ff63bd39
3
+ size 4954847392
model-00004-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab7b65e7dcdfed22a1636996695ce0c79b06850498aaf56757a9d0143befceb5
3
+ size 4954847392
model-00005-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab238db5d2375959aaf95bf36c9bf375dce785cfee02315a4efeb39609212853
3
+ size 4954847392
model-00006-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30b3ead267d1237e108e69dfc8b1803a5cce22c3ee4c9b8905fb80a3bd46ebcc
3
+ size 4734533160
model.safetensors.index.json ADDED
@@ -0,0 +1,586 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 29540067328
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00006-of-00006.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00006.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00006.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
13
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00006.safetensors",
14
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
15
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
16
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00006.safetensors",
17
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
18
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00006.safetensors",
19
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
20
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00006.safetensors",
21
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
22
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
23
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
24
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
25
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00006.safetensors",
26
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
27
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
28
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00006.safetensors",
29
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
30
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00006.safetensors",
31
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
32
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00006.safetensors",
33
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
34
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
35
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
36
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
37
+ "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
38
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
39
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
40
+ "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
41
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
42
+ "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
43
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
44
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00006.safetensors",
45
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
46
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
47
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
48
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
49
+ "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
50
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
51
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
52
+ "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
53
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
54
+ "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
55
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
56
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00006.safetensors",
57
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
58
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
59
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
60
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
61
+ "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
62
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
63
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
64
+ "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
65
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
66
+ "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
67
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
68
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00006.safetensors",
69
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
70
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
71
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
72
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
73
+ "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
74
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
75
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
76
+ "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
77
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
78
+ "model.layers.13.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
79
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
80
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00006.safetensors",
81
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
82
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
83
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
84
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
85
+ "model.layers.14.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
86
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
87
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
88
+ "model.layers.14.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
89
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
90
+ "model.layers.14.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
91
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
92
+ "model.layers.15.input_layernorm.weight": "model-00003-of-00006.safetensors",
93
+ "model.layers.15.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
94
+ "model.layers.15.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
95
+ "model.layers.15.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
96
+ "model.layers.15.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
97
+ "model.layers.15.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
98
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
99
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
100
+ "model.layers.15.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
101
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
102
+ "model.layers.15.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
103
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
104
+ "model.layers.16.input_layernorm.weight": "model-00003-of-00006.safetensors",
105
+ "model.layers.16.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
106
+ "model.layers.16.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
107
+ "model.layers.16.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
108
+ "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
109
+ "model.layers.16.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
110
+ "model.layers.16.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
111
+ "model.layers.16.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
112
+ "model.layers.16.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
113
+ "model.layers.16.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
114
+ "model.layers.16.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
115
+ "model.layers.16.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
116
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00006.safetensors",
117
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
118
+ "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
119
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
120
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
121
+ "model.layers.17.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
122
+ "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
123
+ "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
124
+ "model.layers.17.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
125
+ "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
126
+ "model.layers.17.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
127
+ "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
128
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00006.safetensors",
129
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
130
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
131
+ "model.layers.18.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
132
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
133
+ "model.layers.18.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
134
+ "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
135
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
136
+ "model.layers.18.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
137
+ "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
138
+ "model.layers.18.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
139
+ "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
140
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00006.safetensors",
141
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
142
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
143
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
144
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
145
+ "model.layers.19.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
146
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
147
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
148
+ "model.layers.19.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
149
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
150
+ "model.layers.19.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
151
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
152
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00006.safetensors",
153
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
154
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
155
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
156
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
157
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00006.safetensors",
158
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
159
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
160
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00006.safetensors",
161
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
162
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00006.safetensors",
163
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
164
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00006.safetensors",
165
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
166
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
167
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
168
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
169
+ "model.layers.20.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
170
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
171
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
172
+ "model.layers.20.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
173
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
174
+ "model.layers.20.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
175
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
176
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00006.safetensors",
177
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
178
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
179
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
180
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
181
+ "model.layers.21.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
182
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
183
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
184
+ "model.layers.21.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
185
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
186
+ "model.layers.21.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
187
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
188
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00006.safetensors",
189
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
190
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
191
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
192
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
193
+ "model.layers.22.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
194
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
195
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
196
+ "model.layers.22.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
197
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
198
+ "model.layers.22.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
199
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
200
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00006.safetensors",
201
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
202
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
203
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
204
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
205
+ "model.layers.23.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
206
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
207
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
208
+ "model.layers.23.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
209
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
210
+ "model.layers.23.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
211
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
212
+ "model.layers.24.input_layernorm.weight": "model-00004-of-00006.safetensors",
213
+ "model.layers.24.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
214
+ "model.layers.24.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
215
+ "model.layers.24.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
216
+ "model.layers.24.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
217
+ "model.layers.24.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
218
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
219
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
220
+ "model.layers.24.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
221
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
222
+ "model.layers.24.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
223
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
224
+ "model.layers.25.input_layernorm.weight": "model-00004-of-00006.safetensors",
225
+ "model.layers.25.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
226
+ "model.layers.25.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
227
+ "model.layers.25.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
228
+ "model.layers.25.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
229
+ "model.layers.25.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
230
+ "model.layers.25.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
231
+ "model.layers.25.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
232
+ "model.layers.25.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
233
+ "model.layers.25.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
234
+ "model.layers.25.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
235
+ "model.layers.25.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
236
+ "model.layers.26.input_layernorm.weight": "model-00004-of-00006.safetensors",
237
+ "model.layers.26.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
238
+ "model.layers.26.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
239
+ "model.layers.26.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
240
+ "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
241
+ "model.layers.26.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
242
+ "model.layers.26.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
243
+ "model.layers.26.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
244
+ "model.layers.26.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
245
+ "model.layers.26.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
246
+ "model.layers.26.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
247
+ "model.layers.26.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
248
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00006.safetensors",
249
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
250
+ "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
251
+ "model.layers.27.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
252
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
253
+ "model.layers.27.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
254
+ "model.layers.27.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
255
+ "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
256
+ "model.layers.27.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
257
+ "model.layers.27.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
258
+ "model.layers.27.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
259
+ "model.layers.27.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
260
+ "model.layers.28.input_layernorm.weight": "model-00004-of-00006.safetensors",
261
+ "model.layers.28.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
262
+ "model.layers.28.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
263
+ "model.layers.28.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
264
+ "model.layers.28.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
265
+ "model.layers.28.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
266
+ "model.layers.28.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
267
+ "model.layers.28.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
268
+ "model.layers.28.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
269
+ "model.layers.28.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
270
+ "model.layers.28.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
271
+ "model.layers.28.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
272
+ "model.layers.29.input_layernorm.weight": "model-00004-of-00006.safetensors",
273
+ "model.layers.29.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
274
+ "model.layers.29.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
275
+ "model.layers.29.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
276
+ "model.layers.29.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
277
+ "model.layers.29.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
278
+ "model.layers.29.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
279
+ "model.layers.29.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
280
+ "model.layers.29.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
281
+ "model.layers.29.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
282
+ "model.layers.29.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
283
+ "model.layers.29.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
284
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00006.safetensors",
285
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
286
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
287
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
288
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
289
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00006.safetensors",
290
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
291
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
292
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00006.safetensors",
293
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
294
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00006.safetensors",
295
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
296
+ "model.layers.30.input_layernorm.weight": "model-00004-of-00006.safetensors",
297
+ "model.layers.30.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
298
+ "model.layers.30.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
299
+ "model.layers.30.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
300
+ "model.layers.30.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
301
+ "model.layers.30.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
302
+ "model.layers.30.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
303
+ "model.layers.30.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
304
+ "model.layers.30.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
305
+ "model.layers.30.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
306
+ "model.layers.30.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
307
+ "model.layers.30.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
308
+ "model.layers.31.input_layernorm.weight": "model-00004-of-00006.safetensors",
309
+ "model.layers.31.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
310
+ "model.layers.31.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
311
+ "model.layers.31.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
312
+ "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
313
+ "model.layers.31.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
314
+ "model.layers.31.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
315
+ "model.layers.31.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
316
+ "model.layers.31.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
317
+ "model.layers.31.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
318
+ "model.layers.31.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
319
+ "model.layers.31.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
320
+ "model.layers.32.input_layernorm.weight": "model-00004-of-00006.safetensors",
321
+ "model.layers.32.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
322
+ "model.layers.32.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
323
+ "model.layers.32.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
324
+ "model.layers.32.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
325
+ "model.layers.32.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
326
+ "model.layers.32.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
327
+ "model.layers.32.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
328
+ "model.layers.32.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
329
+ "model.layers.32.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
330
+ "model.layers.32.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
331
+ "model.layers.32.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
332
+ "model.layers.33.input_layernorm.weight": "model-00005-of-00006.safetensors",
333
+ "model.layers.33.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
334
+ "model.layers.33.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
335
+ "model.layers.33.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
336
+ "model.layers.33.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
337
+ "model.layers.33.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
338
+ "model.layers.33.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
339
+ "model.layers.33.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
340
+ "model.layers.33.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
341
+ "model.layers.33.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
342
+ "model.layers.33.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
343
+ "model.layers.33.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
344
+ "model.layers.34.input_layernorm.weight": "model-00005-of-00006.safetensors",
345
+ "model.layers.34.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
346
+ "model.layers.34.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
347
+ "model.layers.34.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
348
+ "model.layers.34.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
349
+ "model.layers.34.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
350
+ "model.layers.34.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
351
+ "model.layers.34.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
352
+ "model.layers.34.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
353
+ "model.layers.34.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
354
+ "model.layers.34.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
355
+ "model.layers.34.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
356
+ "model.layers.35.input_layernorm.weight": "model-00005-of-00006.safetensors",
357
+ "model.layers.35.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
358
+ "model.layers.35.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
359
+ "model.layers.35.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
360
+ "model.layers.35.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
361
+ "model.layers.35.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
362
+ "model.layers.35.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
363
+ "model.layers.35.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
364
+ "model.layers.35.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
365
+ "model.layers.35.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
366
+ "model.layers.35.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
367
+ "model.layers.35.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
368
+ "model.layers.36.input_layernorm.weight": "model-00005-of-00006.safetensors",
369
+ "model.layers.36.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
370
+ "model.layers.36.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
371
+ "model.layers.36.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
372
+ "model.layers.36.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
373
+ "model.layers.36.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
374
+ "model.layers.36.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
375
+ "model.layers.36.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
376
+ "model.layers.36.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
377
+ "model.layers.36.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
378
+ "model.layers.36.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
379
+ "model.layers.36.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
380
+ "model.layers.37.input_layernorm.weight": "model-00005-of-00006.safetensors",
381
+ "model.layers.37.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
382
+ "model.layers.37.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
383
+ "model.layers.37.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
384
+ "model.layers.37.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
385
+ "model.layers.37.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
386
+ "model.layers.37.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
387
+ "model.layers.37.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
388
+ "model.layers.37.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
389
+ "model.layers.37.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
390
+ "model.layers.37.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
391
+ "model.layers.37.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
392
+ "model.layers.38.input_layernorm.weight": "model-00005-of-00006.safetensors",
393
+ "model.layers.38.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
394
+ "model.layers.38.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
395
+ "model.layers.38.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
396
+ "model.layers.38.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
397
+ "model.layers.38.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
398
+ "model.layers.38.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
399
+ "model.layers.38.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
400
+ "model.layers.38.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
401
+ "model.layers.38.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
402
+ "model.layers.38.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
403
+ "model.layers.38.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
404
+ "model.layers.39.input_layernorm.weight": "model-00005-of-00006.safetensors",
405
+ "model.layers.39.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
406
+ "model.layers.39.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
407
+ "model.layers.39.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
408
+ "model.layers.39.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
409
+ "model.layers.39.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
410
+ "model.layers.39.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
411
+ "model.layers.39.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
412
+ "model.layers.39.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
413
+ "model.layers.39.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
414
+ "model.layers.39.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
415
+ "model.layers.39.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
416
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00006.safetensors",
417
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
418
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
419
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
420
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
421
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00006.safetensors",
422
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
423
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
424
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00006.safetensors",
425
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
426
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00006.safetensors",
427
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
428
+ "model.layers.40.input_layernorm.weight": "model-00005-of-00006.safetensors",
429
+ "model.layers.40.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
430
+ "model.layers.40.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
431
+ "model.layers.40.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
432
+ "model.layers.40.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
433
+ "model.layers.40.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
434
+ "model.layers.40.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
435
+ "model.layers.40.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
436
+ "model.layers.40.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
437
+ "model.layers.40.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
438
+ "model.layers.40.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
439
+ "model.layers.40.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
440
+ "model.layers.41.input_layernorm.weight": "model-00005-of-00006.safetensors",
441
+ "model.layers.41.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
442
+ "model.layers.41.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
443
+ "model.layers.41.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
444
+ "model.layers.41.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
445
+ "model.layers.41.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
446
+ "model.layers.41.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
447
+ "model.layers.41.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
448
+ "model.layers.41.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
449
+ "model.layers.41.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
450
+ "model.layers.41.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
451
+ "model.layers.41.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
452
+ "model.layers.42.input_layernorm.weight": "model-00006-of-00006.safetensors",
453
+ "model.layers.42.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
454
+ "model.layers.42.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
455
+ "model.layers.42.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
456
+ "model.layers.42.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
457
+ "model.layers.42.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
458
+ "model.layers.42.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
459
+ "model.layers.42.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
460
+ "model.layers.42.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
461
+ "model.layers.42.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
462
+ "model.layers.42.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
463
+ "model.layers.42.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
464
+ "model.layers.43.input_layernorm.weight": "model-00006-of-00006.safetensors",
465
+ "model.layers.43.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
466
+ "model.layers.43.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
467
+ "model.layers.43.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
468
+ "model.layers.43.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
469
+ "model.layers.43.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
470
+ "model.layers.43.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
471
+ "model.layers.43.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
472
+ "model.layers.43.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
473
+ "model.layers.43.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
474
+ "model.layers.43.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
475
+ "model.layers.43.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
476
+ "model.layers.44.input_layernorm.weight": "model-00006-of-00006.safetensors",
477
+ "model.layers.44.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
478
+ "model.layers.44.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
479
+ "model.layers.44.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
480
+ "model.layers.44.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
481
+ "model.layers.44.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
482
+ "model.layers.44.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
483
+ "model.layers.44.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
484
+ "model.layers.44.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
485
+ "model.layers.44.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
486
+ "model.layers.44.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
487
+ "model.layers.44.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
488
+ "model.layers.45.input_layernorm.weight": "model-00006-of-00006.safetensors",
489
+ "model.layers.45.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
490
+ "model.layers.45.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
491
+ "model.layers.45.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
492
+ "model.layers.45.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
493
+ "model.layers.45.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
494
+ "model.layers.45.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
495
+ "model.layers.45.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
496
+ "model.layers.45.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
497
+ "model.layers.45.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
498
+ "model.layers.45.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
499
+ "model.layers.45.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
500
+ "model.layers.46.input_layernorm.weight": "model-00006-of-00006.safetensors",
501
+ "model.layers.46.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
502
+ "model.layers.46.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
503
+ "model.layers.46.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
504
+ "model.layers.46.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
505
+ "model.layers.46.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
506
+ "model.layers.46.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
507
+ "model.layers.46.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
508
+ "model.layers.46.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
509
+ "model.layers.46.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
510
+ "model.layers.46.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
511
+ "model.layers.46.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
512
+ "model.layers.47.input_layernorm.weight": "model-00006-of-00006.safetensors",
513
+ "model.layers.47.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
514
+ "model.layers.47.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
515
+ "model.layers.47.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
516
+ "model.layers.47.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
517
+ "model.layers.47.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
518
+ "model.layers.47.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
519
+ "model.layers.47.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
520
+ "model.layers.47.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
521
+ "model.layers.47.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
522
+ "model.layers.47.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
523
+ "model.layers.47.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
524
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00006.safetensors",
525
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
526
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
527
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
528
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
529
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00006.safetensors",
530
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
531
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
532
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00006.safetensors",
533
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
534
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00006.safetensors",
535
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
536
+ "model.layers.6.input_layernorm.weight": "model-00002-of-00006.safetensors",
537
+ "model.layers.6.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
538
+ "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
539
+ "model.layers.6.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
540
+ "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
541
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00006.safetensors",
542
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
543
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
544
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00006.safetensors",
545
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
546
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00006.safetensors",
547
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
548
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00006.safetensors",
549
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
550
+ "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
551
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
552
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
553
+ "model.layers.7.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
554
+ "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
555
+ "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
556
+ "model.layers.7.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
557
+ "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
558
+ "model.layers.7.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
559
+ "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
560
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00006.safetensors",
561
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
562
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
563
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
564
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
565
+ "model.layers.8.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
566
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
567
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
568
+ "model.layers.8.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
569
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
570
+ "model.layers.8.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
571
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
572
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00006.safetensors",
573
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
574
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
575
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
576
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
577
+ "model.layers.9.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
578
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
579
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
580
+ "model.layers.9.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
581
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
582
+ "model.layers.9.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
583
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
584
+ "model.norm.weight": "model-00006-of-00006.safetensors"
585
+ }
586
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5eee858c5123a4279c3e1f7b81247343f356ac767940b2692a928ad929543214
3
+ size 11422063
tokenizer_config.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{{ messages[0]['content'] }}",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|endoftext|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 131072,
204
+ "pad_token": "<|endoftext|>",
205
+ "split_special_tokens": false,
206
+ "tokenizer_class": "Qwen2Tokenizer",
207
+ "unk_token": null
208
+ }
trainer_state.json ADDED
@@ -0,0 +1,1138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9941520467836257,
5
+ "eval_steps": 500,
6
+ "global_step": 85,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "completion_length": 233.2692413330078,
13
+ "epoch": 0.011695906432748537,
14
+ "grad_norm": 0.7825680366734882,
15
+ "kl": 0.00028896331787109375,
16
+ "learning_rate": 1e-05,
17
+ "loss": 0.0589,
18
+ "reward": 0.44190600514411926,
19
+ "reward_std": 0.1345965526998043,
20
+ "rewards/length_reward": 0.0725274495780468,
21
+ "rewards/similarity_reward": 0.3693785071372986,
22
+ "step": 1
23
+ },
24
+ {
25
+ "completion_length": 225.36538696289062,
26
+ "epoch": 0.023391812865497075,
27
+ "grad_norm": 0.7210039566738229,
28
+ "kl": 0.0002727508544921875,
29
+ "learning_rate": 1e-05,
30
+ "loss": 0.0685,
31
+ "reward": 0.5305129289627075,
32
+ "reward_std": 0.11423949897289276,
33
+ "rewards/length_reward": 0.07417580112814903,
34
+ "rewards/similarity_reward": 0.4563370496034622,
35
+ "step": 2
36
+ },
37
+ {
38
+ "completion_length": 226.0714340209961,
39
+ "epoch": 0.03508771929824561,
40
+ "grad_norm": 0.8017988620602983,
41
+ "kl": 0.000553131103515625,
42
+ "learning_rate": 1e-05,
43
+ "loss": 0.0625,
44
+ "reward": 0.44079823791980743,
45
+ "reward_std": 0.16690925508737564,
46
+ "rewards/length_reward": 0.06483515352010727,
47
+ "rewards/similarity_reward": 0.3759630620479584,
48
+ "step": 3
49
+ },
50
+ {
51
+ "completion_length": 224.1456069946289,
52
+ "epoch": 0.04678362573099415,
53
+ "grad_norm": 0.6572331069799515,
54
+ "kl": 0.0007152557373046875,
55
+ "learning_rate": 1e-05,
56
+ "loss": 0.0163,
57
+ "reward": 0.6230878233909607,
58
+ "reward_std": 0.09758869931101799,
59
+ "rewards/length_reward": 0.08571425825357437,
60
+ "rewards/similarity_reward": 0.5373735278844833,
61
+ "step": 4
62
+ },
63
+ {
64
+ "completion_length": 227.739013671875,
65
+ "epoch": 0.05847953216374269,
66
+ "grad_norm": 0.7644057699592899,
67
+ "kl": 0.001308441162109375,
68
+ "learning_rate": 1e-05,
69
+ "loss": 0.0284,
70
+ "reward": 0.5326894819736481,
71
+ "reward_std": 0.08687691763043404,
72
+ "rewards/length_reward": 0.080494474619627,
73
+ "rewards/similarity_reward": 0.452194944024086,
74
+ "step": 5
75
+ },
76
+ {
77
+ "completion_length": 260.0604553222656,
78
+ "epoch": 0.07017543859649122,
79
+ "grad_norm": 0.6020987541320493,
80
+ "kl": 0.001560211181640625,
81
+ "learning_rate": 1e-05,
82
+ "loss": 0.0216,
83
+ "reward": 0.5758364498615265,
84
+ "reward_std": 0.1088729053735733,
85
+ "rewards/length_reward": 0.07472525537014008,
86
+ "rewards/similarity_reward": 0.5011111199855804,
87
+ "step": 6
88
+ },
89
+ {
90
+ "completion_length": 201.97252655029297,
91
+ "epoch": 0.08187134502923976,
92
+ "grad_norm": 0.8604822682015656,
93
+ "kl": 0.003173828125,
94
+ "learning_rate": 1e-05,
95
+ "loss": 0.0533,
96
+ "reward": 0.4720712900161743,
97
+ "reward_std": 0.09716014564037323,
98
+ "rewards/length_reward": 0.07939557731151581,
99
+ "rewards/similarity_reward": 0.3926757276058197,
100
+ "step": 7
101
+ },
102
+ {
103
+ "completion_length": 222.34890747070312,
104
+ "epoch": 0.0935672514619883,
105
+ "grad_norm": 0.694625551351849,
106
+ "kl": 0.00310516357421875,
107
+ "learning_rate": 1e-05,
108
+ "loss": 0.0127,
109
+ "reward": 0.5358779579401016,
110
+ "reward_std": 0.10114030539989471,
111
+ "rewards/length_reward": 0.0755494236946106,
112
+ "rewards/similarity_reward": 0.46032850444316864,
113
+ "step": 8
114
+ },
115
+ {
116
+ "completion_length": 217.7005615234375,
117
+ "epoch": 0.10526315789473684,
118
+ "grad_norm": 0.7458008727954507,
119
+ "kl": 0.00432586669921875,
120
+ "learning_rate": 1e-05,
121
+ "loss": 0.0374,
122
+ "reward": 0.5618502795696259,
123
+ "reward_std": 0.12866197898983955,
124
+ "rewards/length_reward": 0.07774722576141357,
125
+ "rewards/similarity_reward": 0.4841030240058899,
126
+ "step": 9
127
+ },
128
+ {
129
+ "completion_length": 216.4945068359375,
130
+ "epoch": 0.11695906432748537,
131
+ "grad_norm": 0.7777163297040798,
132
+ "kl": 0.0069580078125,
133
+ "learning_rate": 1e-05,
134
+ "loss": 0.031,
135
+ "reward": 0.5312780439853668,
136
+ "reward_std": 0.10122782737016678,
137
+ "rewards/length_reward": 0.07829667627811432,
138
+ "rewards/similarity_reward": 0.4529813081026077,
139
+ "step": 10
140
+ },
141
+ {
142
+ "completion_length": 231.3049545288086,
143
+ "epoch": 0.1286549707602339,
144
+ "grad_norm": 0.7043226037353059,
145
+ "kl": 0.00714111328125,
146
+ "learning_rate": 1e-05,
147
+ "loss": 0.0418,
148
+ "reward": 0.5691985487937927,
149
+ "reward_std": 0.08419586345553398,
150
+ "rewards/length_reward": 0.08131865039467812,
151
+ "rewards/similarity_reward": 0.4878798723220825,
152
+ "step": 11
153
+ },
154
+ {
155
+ "completion_length": 196.5769271850586,
156
+ "epoch": 0.14035087719298245,
157
+ "grad_norm": 0.7941199097385595,
158
+ "kl": 0.01031494140625,
159
+ "learning_rate": 1e-05,
160
+ "loss": 0.0164,
161
+ "reward": 0.5117136538028717,
162
+ "reward_std": 0.09071975573897362,
163
+ "rewards/length_reward": 0.0793955810368061,
164
+ "rewards/similarity_reward": 0.4323180317878723,
165
+ "step": 12
166
+ },
167
+ {
168
+ "completion_length": 204.71428680419922,
169
+ "epoch": 0.15204678362573099,
170
+ "grad_norm": 0.8123521495367484,
171
+ "kl": 0.01031494140625,
172
+ "learning_rate": 1e-05,
173
+ "loss": 0.0074,
174
+ "reward": 0.542015552520752,
175
+ "reward_std": 0.0827392153441906,
176
+ "rewards/length_reward": 0.08159337937831879,
177
+ "rewards/similarity_reward": 0.46042218804359436,
178
+ "step": 13
179
+ },
180
+ {
181
+ "completion_length": 207.0467071533203,
182
+ "epoch": 0.16374269005847952,
183
+ "grad_norm": 0.6897883040352237,
184
+ "kl": 0.01214599609375,
185
+ "learning_rate": 1e-05,
186
+ "loss": 0.0216,
187
+ "reward": 0.5222532451152802,
188
+ "reward_std": 0.0871199369430542,
189
+ "rewards/length_reward": 0.0725274533033371,
190
+ "rewards/similarity_reward": 0.44972579181194305,
191
+ "step": 14
192
+ },
193
+ {
194
+ "completion_length": 221.29671478271484,
195
+ "epoch": 0.17543859649122806,
196
+ "grad_norm": 0.7275135176279177,
197
+ "kl": 0.0146484375,
198
+ "learning_rate": 1e-05,
199
+ "loss": 0.0256,
200
+ "reward": 0.5731278657913208,
201
+ "reward_std": 0.07662934809923172,
202
+ "rewards/length_reward": 0.08159337565302849,
203
+ "rewards/similarity_reward": 0.4915344566106796,
204
+ "step": 15
205
+ },
206
+ {
207
+ "completion_length": 215.31593322753906,
208
+ "epoch": 0.1871345029239766,
209
+ "grad_norm": 0.7812549246502659,
210
+ "kl": 0.01953125,
211
+ "learning_rate": 1e-05,
212
+ "loss": 0.0154,
213
+ "reward": 0.5117989480495453,
214
+ "reward_std": 0.08758236095309258,
215
+ "rewards/length_reward": 0.07802195474505424,
216
+ "rewards/similarity_reward": 0.43377695977687836,
217
+ "step": 16
218
+ },
219
+ {
220
+ "completion_length": 220.59341430664062,
221
+ "epoch": 0.19883040935672514,
222
+ "grad_norm": 0.7657085041519065,
223
+ "kl": 0.02239990234375,
224
+ "learning_rate": 1e-05,
225
+ "loss": 0.0072,
226
+ "reward": 0.482890322804451,
227
+ "reward_std": 0.0868370421230793,
228
+ "rewards/length_reward": 0.07527470216155052,
229
+ "rewards/similarity_reward": 0.4076155871152878,
230
+ "step": 17
231
+ },
232
+ {
233
+ "completion_length": 206.3983612060547,
234
+ "epoch": 0.21052631578947367,
235
+ "grad_norm": 0.7396272289433179,
236
+ "kl": 0.02105712890625,
237
+ "learning_rate": 1e-05,
238
+ "loss": 0.01,
239
+ "reward": 0.5376324951648712,
240
+ "reward_std": 0.06652756221592426,
241
+ "rewards/length_reward": 0.07390107586979866,
242
+ "rewards/similarity_reward": 0.4637313634157181,
243
+ "step": 18
244
+ },
245
+ {
246
+ "completion_length": 224.80770111083984,
247
+ "epoch": 0.2222222222222222,
248
+ "grad_norm": 0.7091479350725627,
249
+ "kl": 0.0283203125,
250
+ "learning_rate": 1e-05,
251
+ "loss": -0.0037,
252
+ "reward": 0.6033537983894348,
253
+ "reward_std": 0.09801716729998589,
254
+ "rewards/length_reward": 0.09120875224471092,
255
+ "rewards/similarity_reward": 0.5121450126171112,
256
+ "step": 19
257
+ },
258
+ {
259
+ "completion_length": 197.4148406982422,
260
+ "epoch": 0.23391812865497075,
261
+ "grad_norm": 0.7489115859239202,
262
+ "kl": 0.02423095703125,
263
+ "learning_rate": 1e-05,
264
+ "loss": 0.008,
265
+ "reward": 0.5394436717033386,
266
+ "reward_std": 0.10236060246825218,
267
+ "rewards/length_reward": 0.0824175514280796,
268
+ "rewards/similarity_reward": 0.45702606439590454,
269
+ "step": 20
270
+ },
271
+ {
272
+ "completion_length": 203.4945068359375,
273
+ "epoch": 0.24561403508771928,
274
+ "grad_norm": 0.8123030841009119,
275
+ "kl": 0.0247802734375,
276
+ "learning_rate": 1e-05,
277
+ "loss": 0.0375,
278
+ "reward": 0.5430706441402435,
279
+ "reward_std": 0.10549085959792137,
280
+ "rewards/length_reward": 0.07390107586979866,
281
+ "rewards/similarity_reward": 0.4691695272922516,
282
+ "step": 21
283
+ },
284
+ {
285
+ "completion_length": 214.34615325927734,
286
+ "epoch": 0.2573099415204678,
287
+ "grad_norm": 0.7650885748136014,
288
+ "kl": 0.02874755859375,
289
+ "learning_rate": 1e-05,
290
+ "loss": -0.0077,
291
+ "reward": 0.5628645718097687,
292
+ "reward_std": 0.10375452041625977,
293
+ "rewards/length_reward": 0.08489007875323296,
294
+ "rewards/similarity_reward": 0.47797445952892303,
295
+ "step": 22
296
+ },
297
+ {
298
+ "completion_length": 216.52472686767578,
299
+ "epoch": 0.26900584795321636,
300
+ "grad_norm": 0.8060675248733321,
301
+ "kl": 0.0272216796875,
302
+ "learning_rate": 1e-05,
303
+ "loss": 0.0077,
304
+ "reward": 0.5317259877920151,
305
+ "reward_std": 0.11137965694069862,
306
+ "rewards/length_reward": 0.07939557731151581,
307
+ "rewards/similarity_reward": 0.4523303806781769,
308
+ "step": 23
309
+ },
310
+ {
311
+ "completion_length": 193.7335205078125,
312
+ "epoch": 0.2807017543859649,
313
+ "grad_norm": 0.8056017468735167,
314
+ "kl": 0.02947998046875,
315
+ "learning_rate": 1e-05,
316
+ "loss": 0.0115,
317
+ "reward": 0.5501070320606232,
318
+ "reward_std": 0.07095552235841751,
319
+ "rewards/length_reward": 0.08406590297818184,
320
+ "rewards/similarity_reward": 0.46604107320308685,
321
+ "step": 24
322
+ },
323
+ {
324
+ "completion_length": 208.95879364013672,
325
+ "epoch": 0.29239766081871343,
326
+ "grad_norm": 0.7293689736995611,
327
+ "kl": 0.02752685546875,
328
+ "learning_rate": 1e-05,
329
+ "loss": 0.0195,
330
+ "reward": 0.5526517927646637,
331
+ "reward_std": 0.07119211554527283,
332
+ "rewards/length_reward": 0.07939557731151581,
333
+ "rewards/similarity_reward": 0.4732562005519867,
334
+ "step": 25
335
+ },
336
+ {
337
+ "completion_length": 211.06319427490234,
338
+ "epoch": 0.30409356725146197,
339
+ "grad_norm": 0.7687520625215489,
340
+ "kl": 0.0325927734375,
341
+ "learning_rate": 1e-05,
342
+ "loss": -0.0022,
343
+ "reward": 0.5052716434001923,
344
+ "reward_std": 0.07904357835650444,
345
+ "rewards/length_reward": 0.07582415267825127,
346
+ "rewards/similarity_reward": 0.4294474273920059,
347
+ "step": 26
348
+ },
349
+ {
350
+ "completion_length": 201.81868743896484,
351
+ "epoch": 0.3157894736842105,
352
+ "grad_norm": 0.7888998841444814,
353
+ "kl": 0.0352783203125,
354
+ "learning_rate": 1e-05,
355
+ "loss": 0.0177,
356
+ "reward": 0.4917192906141281,
357
+ "reward_std": 0.08234058320522308,
358
+ "rewards/length_reward": 0.08598897978663445,
359
+ "rewards/similarity_reward": 0.40573032200336456,
360
+ "step": 27
361
+ },
362
+ {
363
+ "completion_length": 204.54671478271484,
364
+ "epoch": 0.32748538011695905,
365
+ "grad_norm": 0.8013071778096233,
366
+ "kl": 0.031005859375,
367
+ "learning_rate": 1e-05,
368
+ "loss": 0.016,
369
+ "reward": 0.56119704246521,
370
+ "reward_std": 0.08157838508486748,
371
+ "rewards/length_reward": 0.07390107586979866,
372
+ "rewards/similarity_reward": 0.487295925617218,
373
+ "step": 28
374
+ },
375
+ {
376
+ "completion_length": 208.75275421142578,
377
+ "epoch": 0.3391812865497076,
378
+ "grad_norm": 0.7637875016013174,
379
+ "kl": 0.03863525390625,
380
+ "learning_rate": 1e-05,
381
+ "loss": -0.0051,
382
+ "reward": 0.5819019079208374,
383
+ "reward_std": 0.07344460859894753,
384
+ "rewards/length_reward": 0.08379117771983147,
385
+ "rewards/similarity_reward": 0.49811069667339325,
386
+ "step": 29
387
+ },
388
+ {
389
+ "completion_length": 218.49451446533203,
390
+ "epoch": 0.3508771929824561,
391
+ "grad_norm": 0.6802851696241219,
392
+ "kl": 0.02716064453125,
393
+ "learning_rate": 1e-05,
394
+ "loss": 0.0213,
395
+ "reward": 0.616778552532196,
396
+ "reward_std": 0.07313168421387672,
397
+ "rewards/length_reward": 0.07912085205316544,
398
+ "rewards/similarity_reward": 0.5376576334238052,
399
+ "step": 30
400
+ },
401
+ {
402
+ "completion_length": 221.78846740722656,
403
+ "epoch": 0.36257309941520466,
404
+ "grad_norm": 0.7062376746120537,
405
+ "kl": 0.03472900390625,
406
+ "learning_rate": 1e-05,
407
+ "loss": 0.0144,
408
+ "reward": 0.607868492603302,
409
+ "reward_std": 0.07485750317573547,
410
+ "rewards/length_reward": 0.08708787709474564,
411
+ "rewards/similarity_reward": 0.5207805633544922,
412
+ "step": 31
413
+ },
414
+ {
415
+ "completion_length": 210.76923370361328,
416
+ "epoch": 0.3742690058479532,
417
+ "grad_norm": 0.7764830119349555,
418
+ "kl": 0.038818359375,
419
+ "learning_rate": 1e-05,
420
+ "loss": 0.0001,
421
+ "reward": 0.504259467124939,
422
+ "reward_std": 0.08111266419291496,
423
+ "rewards/length_reward": 0.08186810463666916,
424
+ "rewards/similarity_reward": 0.422391340136528,
425
+ "step": 32
426
+ },
427
+ {
428
+ "completion_length": 213.78297424316406,
429
+ "epoch": 0.38596491228070173,
430
+ "grad_norm": 0.6888593105229915,
431
+ "kl": 0.0301513671875,
432
+ "learning_rate": 1e-05,
433
+ "loss": -0.0007,
434
+ "reward": 0.6102275550365448,
435
+ "reward_std": 0.06388814933598042,
436
+ "rewards/length_reward": 0.08901095390319824,
437
+ "rewards/similarity_reward": 0.5212165713310242,
438
+ "step": 33
439
+ },
440
+ {
441
+ "completion_length": 227.48902130126953,
442
+ "epoch": 0.39766081871345027,
443
+ "grad_norm": 0.7163494050752441,
444
+ "kl": 0.0367431640625,
445
+ "learning_rate": 1e-05,
446
+ "loss": 0.0095,
447
+ "reward": 0.5973767936229706,
448
+ "reward_std": 0.06598795019090176,
449
+ "rewards/length_reward": 0.08626370877027512,
450
+ "rewards/similarity_reward": 0.5111130028963089,
451
+ "step": 34
452
+ },
453
+ {
454
+ "completion_length": 224.81319427490234,
455
+ "epoch": 0.4093567251461988,
456
+ "grad_norm": 0.7500566248738971,
457
+ "kl": 0.0406494140625,
458
+ "learning_rate": 1e-05,
459
+ "loss": 0.0211,
460
+ "reward": 0.566271185874939,
461
+ "reward_std": 0.09086765348911285,
462
+ "rewards/length_reward": 0.0824175551533699,
463
+ "rewards/similarity_reward": 0.4838535785675049,
464
+ "step": 35
465
+ },
466
+ {
467
+ "completion_length": 206.64286041259766,
468
+ "epoch": 0.42105263157894735,
469
+ "grad_norm": 0.81166868681819,
470
+ "kl": 0.048828125,
471
+ "learning_rate": 1e-05,
472
+ "loss": -0.0036,
473
+ "reward": 0.5500784069299698,
474
+ "reward_std": 0.08153346925973892,
475
+ "rewards/length_reward": 0.0903845839202404,
476
+ "rewards/similarity_reward": 0.4596937447786331,
477
+ "step": 36
478
+ },
479
+ {
480
+ "completion_length": 234.05770111083984,
481
+ "epoch": 0.4327485380116959,
482
+ "grad_norm": 0.6394477552036412,
483
+ "kl": 0.031494140625,
484
+ "learning_rate": 1e-05,
485
+ "loss": 0.0208,
486
+ "reward": 0.6713371574878693,
487
+ "reward_std": 0.07545671984553337,
488
+ "rewards/length_reward": 0.08873623237013817,
489
+ "rewards/similarity_reward": 0.5826008915901184,
490
+ "step": 37
491
+ },
492
+ {
493
+ "completion_length": 217.49451446533203,
494
+ "epoch": 0.4444444444444444,
495
+ "grad_norm": 0.7655211570094593,
496
+ "kl": 0.035400390625,
497
+ "learning_rate": 1e-05,
498
+ "loss": 0.0158,
499
+ "reward": 0.5549522340297699,
500
+ "reward_std": 0.10650453716516495,
501
+ "rewards/length_reward": 0.07774722576141357,
502
+ "rewards/similarity_reward": 0.47720494866371155,
503
+ "step": 38
504
+ },
505
+ {
506
+ "completion_length": 229.27748107910156,
507
+ "epoch": 0.45614035087719296,
508
+ "grad_norm": 0.6735927375021588,
509
+ "kl": 0.0333251953125,
510
+ "learning_rate": 1e-05,
511
+ "loss": -0.0082,
512
+ "reward": 0.6093992590904236,
513
+ "reward_std": 0.07809260673820972,
514
+ "rewards/length_reward": 0.08379117771983147,
515
+ "rewards/similarity_reward": 0.5256080627441406,
516
+ "step": 39
517
+ },
518
+ {
519
+ "completion_length": 226.74176025390625,
520
+ "epoch": 0.4678362573099415,
521
+ "grad_norm": 0.6219194225488394,
522
+ "kl": 0.03179931640625,
523
+ "learning_rate": 1e-05,
524
+ "loss": 0.0051,
525
+ "reward": 0.6532130539417267,
526
+ "reward_std": 0.07173651456832886,
527
+ "rewards/length_reward": 0.08104392886161804,
528
+ "rewards/similarity_reward": 0.5721690654754639,
529
+ "step": 40
530
+ },
531
+ {
532
+ "completion_length": 218.32418060302734,
533
+ "epoch": 0.47953216374269003,
534
+ "grad_norm": 0.6670956759433704,
535
+ "kl": 0.03472900390625,
536
+ "learning_rate": 1e-05,
537
+ "loss": -0.001,
538
+ "reward": 0.5629624724388123,
539
+ "reward_std": 0.07206030376255512,
540
+ "rewards/length_reward": 0.08489007502794266,
541
+ "rewards/similarity_reward": 0.47807231545448303,
542
+ "step": 41
543
+ },
544
+ {
545
+ "completion_length": 212.78846740722656,
546
+ "epoch": 0.49122807017543857,
547
+ "grad_norm": 0.7704492110860773,
548
+ "kl": 0.0404052734375,
549
+ "learning_rate": 1e-05,
550
+ "loss": 0.0107,
551
+ "reward": 0.5724318474531174,
552
+ "reward_std": 0.08364788442850113,
553
+ "rewards/length_reward": 0.08873622864484787,
554
+ "rewards/similarity_reward": 0.4836955815553665,
555
+ "step": 42
556
+ },
557
+ {
558
+ "completion_length": 209.18407440185547,
559
+ "epoch": 0.5029239766081871,
560
+ "grad_norm": 0.8817184095556757,
561
+ "kl": 0.03955078125,
562
+ "learning_rate": 1e-05,
563
+ "loss": 0.0029,
564
+ "reward": 0.5359384119510651,
565
+ "reward_std": 0.08326387777924538,
566
+ "rewards/length_reward": 0.08681315928697586,
567
+ "rewards/similarity_reward": 0.449125200510025,
568
+ "step": 43
569
+ },
570
+ {
571
+ "completion_length": 207.72528076171875,
572
+ "epoch": 0.5146198830409356,
573
+ "grad_norm": 0.7710530359548264,
574
+ "kl": 0.04833984375,
575
+ "learning_rate": 1e-05,
576
+ "loss": -0.0013,
577
+ "reward": 0.5263623893260956,
578
+ "reward_std": 0.08040140569210052,
579
+ "rewards/length_reward": 0.08956040814518929,
580
+ "rewards/similarity_reward": 0.436801940202713,
581
+ "step": 44
582
+ },
583
+ {
584
+ "completion_length": 195.5824203491211,
585
+ "epoch": 0.5263157894736842,
586
+ "grad_norm": 0.7995200427651461,
587
+ "kl": 0.0390625,
588
+ "learning_rate": 1e-05,
589
+ "loss": -0.0037,
590
+ "reward": 0.4943057596683502,
591
+ "reward_std": 0.07873165234923363,
592
+ "rewards/length_reward": 0.08736260235309601,
593
+ "rewards/similarity_reward": 0.40694311261177063,
594
+ "step": 45
595
+ },
596
+ {
597
+ "completion_length": 208.79945373535156,
598
+ "epoch": 0.5380116959064327,
599
+ "grad_norm": 0.7592958031791626,
600
+ "kl": 0.0380859375,
601
+ "learning_rate": 1e-05,
602
+ "loss": 0.0094,
603
+ "reward": 0.5178032070398331,
604
+ "reward_std": 0.09336737170815468,
605
+ "rewards/length_reward": 0.08626370131969452,
606
+ "rewards/similarity_reward": 0.43153949081897736,
607
+ "step": 46
608
+ },
609
+ {
610
+ "completion_length": 202.56868743896484,
611
+ "epoch": 0.5497076023391813,
612
+ "grad_norm": 0.7739746442483526,
613
+ "kl": 0.0489501953125,
614
+ "learning_rate": 1e-05,
615
+ "loss": -0.0013,
616
+ "reward": 0.5555643737316132,
617
+ "reward_std": 0.0753365196287632,
618
+ "rewards/length_reward": 0.08901095390319824,
619
+ "rewards/similarity_reward": 0.46655333042144775,
620
+ "step": 47
621
+ },
622
+ {
623
+ "completion_length": 213.90110778808594,
624
+ "epoch": 0.5614035087719298,
625
+ "grad_norm": 0.7855391198034375,
626
+ "kl": 0.0440673828125,
627
+ "learning_rate": 1e-05,
628
+ "loss": 0.0221,
629
+ "reward": 0.5711711943149567,
630
+ "reward_std": 0.07186082378029823,
631
+ "rewards/length_reward": 0.08159338310360909,
632
+ "rewards/similarity_reward": 0.4895777702331543,
633
+ "step": 48
634
+ },
635
+ {
636
+ "completion_length": 192.1401138305664,
637
+ "epoch": 0.5730994152046783,
638
+ "grad_norm": 0.8123115671741554,
639
+ "kl": 0.04345703125,
640
+ "learning_rate": 1e-05,
641
+ "loss": 0.0046,
642
+ "reward": 0.5523494780063629,
643
+ "reward_std": 0.0726145338267088,
644
+ "rewards/length_reward": 0.08818677812814713,
645
+ "rewards/similarity_reward": 0.4641626626253128,
646
+ "step": 49
647
+ },
648
+ {
649
+ "completion_length": 235.0659408569336,
650
+ "epoch": 0.5847953216374269,
651
+ "grad_norm": 0.6391593806834035,
652
+ "kl": 0.0355224609375,
653
+ "learning_rate": 1e-05,
654
+ "loss": 0.0243,
655
+ "reward": 0.6290010213851929,
656
+ "reward_std": 0.07143229246139526,
657
+ "rewards/length_reward": 0.08653843030333519,
658
+ "rewards/similarity_reward": 0.5424625873565674,
659
+ "step": 50
660
+ },
661
+ {
662
+ "completion_length": 227.45330047607422,
663
+ "epoch": 0.5964912280701754,
664
+ "grad_norm": 0.6758239263146073,
665
+ "kl": 0.04052734375,
666
+ "learning_rate": 1e-05,
667
+ "loss": -0.0,
668
+ "reward": 0.5664761662483215,
669
+ "reward_std": 0.0804094672203064,
670
+ "rewards/length_reward": 0.08983513340353966,
671
+ "rewards/similarity_reward": 0.4766409695148468,
672
+ "step": 51
673
+ },
674
+ {
675
+ "completion_length": 189.59341430664062,
676
+ "epoch": 0.6081871345029239,
677
+ "grad_norm": 0.8915084698151584,
678
+ "kl": 0.08837890625,
679
+ "learning_rate": 1e-05,
680
+ "loss": 0.0051,
681
+ "reward": 0.5132783055305481,
682
+ "reward_std": 0.07486053928732872,
683
+ "rewards/length_reward": 0.08956040814518929,
684
+ "rewards/similarity_reward": 0.42371784150600433,
685
+ "step": 52
686
+ },
687
+ {
688
+ "completion_length": 224.54396057128906,
689
+ "epoch": 0.6198830409356725,
690
+ "grad_norm": 0.6945568764857519,
691
+ "kl": 0.03656005859375,
692
+ "learning_rate": 1e-05,
693
+ "loss": 0.001,
694
+ "reward": 0.6265865862369537,
695
+ "reward_std": 0.06371882371604443,
696
+ "rewards/length_reward": 0.09395601227879524,
697
+ "rewards/similarity_reward": 0.5326304286718369,
698
+ "step": 53
699
+ },
700
+ {
701
+ "completion_length": 215.70330047607422,
702
+ "epoch": 0.631578947368421,
703
+ "grad_norm": 0.6252063256653906,
704
+ "kl": 0.03759765625,
705
+ "learning_rate": 1e-05,
706
+ "loss": -0.0082,
707
+ "reward": 0.6269220411777496,
708
+ "reward_std": 0.08006203919649124,
709
+ "rewards/length_reward": 0.09175820648670197,
710
+ "rewards/similarity_reward": 0.5351637601852417,
711
+ "step": 54
712
+ },
713
+ {
714
+ "completion_length": 239.01649475097656,
715
+ "epoch": 0.6432748538011696,
716
+ "grad_norm": 0.570831508947944,
717
+ "kl": 0.0308837890625,
718
+ "learning_rate": 1e-05,
719
+ "loss": 0.0028,
720
+ "reward": 0.6518236100673676,
721
+ "reward_std": 0.08433263376355171,
722
+ "rewards/length_reward": 0.08901095762848854,
723
+ "rewards/similarity_reward": 0.5628126263618469,
724
+ "step": 55
725
+ },
726
+ {
727
+ "completion_length": 198.64012145996094,
728
+ "epoch": 0.6549707602339181,
729
+ "grad_norm": 0.7836348447961391,
730
+ "kl": 0.043701171875,
731
+ "learning_rate": 1e-05,
732
+ "loss": 0.0133,
733
+ "reward": 0.546076089143753,
734
+ "reward_std": 0.08295740559697151,
735
+ "rewards/length_reward": 0.09368127956986427,
736
+ "rewards/similarity_reward": 0.4523947238922119,
737
+ "step": 56
738
+ },
739
+ {
740
+ "completion_length": 217.36264038085938,
741
+ "epoch": 0.6666666666666666,
742
+ "grad_norm": 0.7867236430386183,
743
+ "kl": 0.0673828125,
744
+ "learning_rate": 1e-05,
745
+ "loss": 0.0175,
746
+ "reward": 0.589568018913269,
747
+ "reward_std": 0.08855083584785461,
748
+ "rewards/length_reward": 0.08818678185343742,
749
+ "rewards/similarity_reward": 0.5013811439275742,
750
+ "step": 57
751
+ },
752
+ {
753
+ "completion_length": 231.38462829589844,
754
+ "epoch": 0.6783625730994152,
755
+ "grad_norm": 0.7085500357265208,
756
+ "kl": 0.04541015625,
757
+ "learning_rate": 1e-05,
758
+ "loss": 0.0038,
759
+ "reward": 0.5928874313831329,
760
+ "reward_std": 0.07966894656419754,
761
+ "rewards/length_reward": 0.09230765700340271,
762
+ "rewards/similarity_reward": 0.500579759478569,
763
+ "step": 58
764
+ },
765
+ {
766
+ "completion_length": 230.62637329101562,
767
+ "epoch": 0.6900584795321637,
768
+ "grad_norm": 0.700881872854234,
769
+ "kl": 0.04595947265625,
770
+ "learning_rate": 1e-05,
771
+ "loss": -0.0039,
772
+ "reward": 0.5898393988609314,
773
+ "reward_std": 0.07648670673370361,
774
+ "rewards/length_reward": 0.08571425452828407,
775
+ "rewards/similarity_reward": 0.5041251480579376,
776
+ "step": 59
777
+ },
778
+ {
779
+ "completion_length": 197.88736724853516,
780
+ "epoch": 0.7017543859649122,
781
+ "grad_norm": 0.7423270028386794,
782
+ "kl": 0.0531005859375,
783
+ "learning_rate": 1e-05,
784
+ "loss": -0.0012,
785
+ "reward": 0.5786347389221191,
786
+ "reward_std": 0.0913703590631485,
787
+ "rewards/length_reward": 0.08873622864484787,
788
+ "rewards/similarity_reward": 0.4898984730243683,
789
+ "step": 60
790
+ },
791
+ {
792
+ "completion_length": 203.12638092041016,
793
+ "epoch": 0.7134502923976608,
794
+ "grad_norm": 0.8090290401791991,
795
+ "kl": 0.054931640625,
796
+ "learning_rate": 1e-05,
797
+ "loss": 0.0077,
798
+ "reward": 0.5577145516872406,
799
+ "reward_std": 0.10217611491680145,
800
+ "rewards/length_reward": 0.09258238598704338,
801
+ "rewards/similarity_reward": 0.46513208746910095,
802
+ "step": 61
803
+ },
804
+ {
805
+ "completion_length": 197.5302276611328,
806
+ "epoch": 0.7251461988304093,
807
+ "grad_norm": 0.792272899719194,
808
+ "kl": 0.1109619140625,
809
+ "learning_rate": 1e-05,
810
+ "loss": -0.0037,
811
+ "reward": 0.5682541728019714,
812
+ "reward_std": 0.07418079674243927,
813
+ "rewards/length_reward": 0.08928568288683891,
814
+ "rewards/similarity_reward": 0.47896839678287506,
815
+ "step": 62
816
+ },
817
+ {
818
+ "completion_length": 204.7335205078125,
819
+ "epoch": 0.7368421052631579,
820
+ "grad_norm": 7.851854181789403,
821
+ "kl": 0.7789306640625,
822
+ "learning_rate": 1e-05,
823
+ "loss": 0.0075,
824
+ "reward": 0.5821267366409302,
825
+ "reward_std": 0.0683053657412529,
826
+ "rewards/length_reward": 0.08763733133673668,
827
+ "rewards/similarity_reward": 0.494489386677742,
828
+ "step": 63
829
+ },
830
+ {
831
+ "completion_length": 189.44780731201172,
832
+ "epoch": 0.7485380116959064,
833
+ "grad_norm": 0.8606804023015805,
834
+ "kl": 0.068603515625,
835
+ "learning_rate": 1e-05,
836
+ "loss": -0.0,
837
+ "reward": 0.5377653539180756,
838
+ "reward_std": 0.09102616086602211,
839
+ "rewards/length_reward": 0.08818678185343742,
840
+ "rewards/similarity_reward": 0.44957853853702545,
841
+ "step": 64
842
+ },
843
+ {
844
+ "completion_length": 210.21703338623047,
845
+ "epoch": 0.7602339181286549,
846
+ "grad_norm": 0.7338561185014666,
847
+ "kl": 0.0609130859375,
848
+ "learning_rate": 1e-05,
849
+ "loss": 0.0058,
850
+ "reward": 0.5642393827438354,
851
+ "reward_std": 0.08369087427854538,
852
+ "rewards/length_reward": 0.08461535349488258,
853
+ "rewards/similarity_reward": 0.4796240031719208,
854
+ "step": 65
855
+ },
856
+ {
857
+ "completion_length": 201.94506072998047,
858
+ "epoch": 0.7719298245614035,
859
+ "grad_norm": 0.7730879112964497,
860
+ "kl": 0.07861328125,
861
+ "learning_rate": 1e-05,
862
+ "loss": -0.005,
863
+ "reward": 0.4594819098711014,
864
+ "reward_std": 0.07900743931531906,
865
+ "rewards/length_reward": 0.0884615071117878,
866
+ "rewards/similarity_reward": 0.3710203170776367,
867
+ "step": 66
868
+ },
869
+ {
870
+ "completion_length": 201.84890747070312,
871
+ "epoch": 0.783625730994152,
872
+ "grad_norm": 0.7523583895602961,
873
+ "kl": 0.065185546875,
874
+ "learning_rate": 1e-05,
875
+ "loss": -0.0015,
876
+ "reward": 0.5604384243488312,
877
+ "reward_std": 0.07452090084552765,
878
+ "rewards/length_reward": 0.08873622491955757,
879
+ "rewards/similarity_reward": 0.47170214354991913,
880
+ "step": 67
881
+ },
882
+ {
883
+ "completion_length": 223.24451446533203,
884
+ "epoch": 0.7953216374269005,
885
+ "grad_norm": 0.6362395602468862,
886
+ "kl": 0.058349609375,
887
+ "learning_rate": 1e-05,
888
+ "loss": -0.0002,
889
+ "reward": 0.6040938794612885,
890
+ "reward_std": 0.07155320793390274,
891
+ "rewards/length_reward": 0.0903845801949501,
892
+ "rewards/similarity_reward": 0.5137092769145966,
893
+ "step": 68
894
+ },
895
+ {
896
+ "completion_length": 207.6703338623047,
897
+ "epoch": 0.8070175438596491,
898
+ "grad_norm": 0.7137389331917928,
899
+ "kl": 0.0574951171875,
900
+ "learning_rate": 1e-05,
901
+ "loss": 0.0091,
902
+ "reward": 0.5744452476501465,
903
+ "reward_std": 0.06715035997331142,
904
+ "rewards/length_reward": 0.08763733133673668,
905
+ "rewards/similarity_reward": 0.4868078976869583,
906
+ "step": 69
907
+ },
908
+ {
909
+ "completion_length": 216.75550079345703,
910
+ "epoch": 0.8187134502923976,
911
+ "grad_norm": 0.657146144923999,
912
+ "kl": 0.0546875,
913
+ "learning_rate": 1e-05,
914
+ "loss": 0.0117,
915
+ "reward": 0.6223109066486359,
916
+ "reward_std": 0.06596036069095135,
917
+ "rewards/length_reward": 0.09313183277845383,
918
+ "rewards/similarity_reward": 0.529179036617279,
919
+ "step": 70
920
+ },
921
+ {
922
+ "completion_length": 199.43682098388672,
923
+ "epoch": 0.8304093567251462,
924
+ "grad_norm": 0.7497595209043092,
925
+ "kl": 0.0567626953125,
926
+ "learning_rate": 1e-05,
927
+ "loss": 0.0084,
928
+ "reward": 0.5526998043060303,
929
+ "reward_std": 0.0681044515222311,
930
+ "rewards/length_reward": 0.09203293174505234,
931
+ "rewards/similarity_reward": 0.46066682040691376,
932
+ "step": 71
933
+ },
934
+ {
935
+ "completion_length": 194.8379135131836,
936
+ "epoch": 0.8421052631578947,
937
+ "grad_norm": 0.8548978780480045,
938
+ "kl": 0.092529296875,
939
+ "learning_rate": 1e-05,
940
+ "loss": -0.003,
941
+ "reward": 0.4842057079076767,
942
+ "reward_std": 0.07755878753960133,
943
+ "rewards/length_reward": 0.08818677812814713,
944
+ "rewards/similarity_reward": 0.3960189074277878,
945
+ "step": 72
946
+ },
947
+ {
948
+ "completion_length": 219.27747344970703,
949
+ "epoch": 0.8538011695906432,
950
+ "grad_norm": 0.6787293206534569,
951
+ "kl": 0.060546875,
952
+ "learning_rate": 1e-05,
953
+ "loss": 0.0001,
954
+ "reward": 0.6061641573905945,
955
+ "reward_std": 0.06255521811544895,
956
+ "rewards/length_reward": 0.08818677812814713,
957
+ "rewards/similarity_reward": 0.517977312207222,
958
+ "step": 73
959
+ },
960
+ {
961
+ "completion_length": 227.14012145996094,
962
+ "epoch": 0.8654970760233918,
963
+ "grad_norm": 0.6689199959869702,
964
+ "kl": 0.0540771484375,
965
+ "learning_rate": 1e-05,
966
+ "loss": -0.0008,
967
+ "reward": 0.6462463140487671,
968
+ "reward_std": 0.07359376363456249,
969
+ "rewards/length_reward": 0.08379117771983147,
970
+ "rewards/similarity_reward": 0.5624551177024841,
971
+ "step": 74
972
+ },
973
+ {
974
+ "completion_length": 228.5192413330078,
975
+ "epoch": 0.8771929824561403,
976
+ "grad_norm": 0.7718934050543206,
977
+ "kl": 0.0638427734375,
978
+ "learning_rate": 1e-05,
979
+ "loss": 0.0198,
980
+ "reward": 0.5483137369155884,
981
+ "reward_std": 0.08430877327919006,
982
+ "rewards/length_reward": 0.0873626060783863,
983
+ "rewards/similarity_reward": 0.4609510749578476,
984
+ "step": 75
985
+ },
986
+ {
987
+ "completion_length": 190.56868743896484,
988
+ "epoch": 0.8888888888888888,
989
+ "grad_norm": 0.756393678015688,
990
+ "kl": 0.064453125,
991
+ "learning_rate": 1e-05,
992
+ "loss": -0.0028,
993
+ "reward": 0.6397236287593842,
994
+ "reward_std": 0.07035822048783302,
995
+ "rewards/length_reward": 0.08489008247852325,
996
+ "rewards/similarity_reward": 0.5548335015773773,
997
+ "step": 76
998
+ },
999
+ {
1000
+ "completion_length": 207.6291275024414,
1001
+ "epoch": 0.9005847953216374,
1002
+ "grad_norm": 0.6890941542559051,
1003
+ "kl": 0.0548095703125,
1004
+ "learning_rate": 1e-05,
1005
+ "loss": -0.0147,
1006
+ "reward": 0.590265542268753,
1007
+ "reward_std": 0.09322262555360794,
1008
+ "rewards/length_reward": 0.08516480773687363,
1009
+ "rewards/similarity_reward": 0.5051007270812988,
1010
+ "step": 77
1011
+ },
1012
+ {
1013
+ "completion_length": 202.84616088867188,
1014
+ "epoch": 0.9122807017543859,
1015
+ "grad_norm": 0.805754221114796,
1016
+ "kl": 0.0643310546875,
1017
+ "learning_rate": 1e-05,
1018
+ "loss": 0.0108,
1019
+ "reward": 0.5014786124229431,
1020
+ "reward_std": 0.060309527441859245,
1021
+ "rewards/length_reward": 0.08983512967824936,
1022
+ "rewards/similarity_reward": 0.41164346039295197,
1023
+ "step": 78
1024
+ },
1025
+ {
1026
+ "completion_length": 207.18132781982422,
1027
+ "epoch": 0.9239766081871345,
1028
+ "grad_norm": 0.7250229184858834,
1029
+ "kl": 0.050048828125,
1030
+ "learning_rate": 1e-05,
1031
+ "loss": 0.0076,
1032
+ "reward": 0.5822539925575256,
1033
+ "reward_std": 0.07761426270008087,
1034
+ "rewards/length_reward": 0.08763733133673668,
1035
+ "rewards/similarity_reward": 0.4946165978908539,
1036
+ "step": 79
1037
+ },
1038
+ {
1039
+ "completion_length": 203.4258270263672,
1040
+ "epoch": 0.935672514619883,
1041
+ "grad_norm": 0.7602294741498272,
1042
+ "kl": 0.066650390625,
1043
+ "learning_rate": 1e-05,
1044
+ "loss": 0.0075,
1045
+ "reward": 0.5682046115398407,
1046
+ "reward_std": 0.07631215453147888,
1047
+ "rewards/length_reward": 0.09560435637831688,
1048
+ "rewards/similarity_reward": 0.47260017693042755,
1049
+ "step": 80
1050
+ },
1051
+ {
1052
+ "completion_length": 201.92858123779297,
1053
+ "epoch": 0.9473684210526315,
1054
+ "grad_norm": 0.7505038725915468,
1055
+ "kl": 0.0601806640625,
1056
+ "learning_rate": 1e-05,
1057
+ "loss": 0.0051,
1058
+ "reward": 0.5681571960449219,
1059
+ "reward_std": 0.053876254707574844,
1060
+ "rewards/length_reward": 0.09587908536195755,
1061
+ "rewards/similarity_reward": 0.47227810323238373,
1062
+ "step": 81
1063
+ },
1064
+ {
1065
+ "completion_length": 219.4203338623047,
1066
+ "epoch": 0.9590643274853801,
1067
+ "grad_norm": 0.6591447000701826,
1068
+ "kl": 0.05517578125,
1069
+ "learning_rate": 1e-05,
1070
+ "loss": 0.0134,
1071
+ "reward": 0.5683076977729797,
1072
+ "reward_std": 0.07923416420817375,
1073
+ "rewards/length_reward": 0.08983513340353966,
1074
+ "rewards/similarity_reward": 0.4784725159406662,
1075
+ "step": 82
1076
+ },
1077
+ {
1078
+ "completion_length": 210.14835357666016,
1079
+ "epoch": 0.9707602339181286,
1080
+ "grad_norm": 0.6978862200398336,
1081
+ "kl": 0.0567626953125,
1082
+ "learning_rate": 1e-05,
1083
+ "loss": 0.0041,
1084
+ "reward": 0.5853509902954102,
1085
+ "reward_std": 0.07063767686486244,
1086
+ "rewards/length_reward": 0.09505491331219673,
1087
+ "rewards/similarity_reward": 0.49029606580734253,
1088
+ "step": 83
1089
+ },
1090
+ {
1091
+ "completion_length": 219.5412139892578,
1092
+ "epoch": 0.9824561403508771,
1093
+ "grad_norm": 0.6559941104194319,
1094
+ "kl": 0.0487060546875,
1095
+ "learning_rate": 1e-05,
1096
+ "loss": 0.0029,
1097
+ "reward": 0.6524507999420166,
1098
+ "reward_std": 0.07049693912267685,
1099
+ "rewards/length_reward": 0.08818677812814713,
1100
+ "rewards/similarity_reward": 0.5642639398574829,
1101
+ "step": 84
1102
+ },
1103
+ {
1104
+ "completion_length": 209.51099395751953,
1105
+ "epoch": 0.9941520467836257,
1106
+ "grad_norm": 0.7849305028391962,
1107
+ "kl": 0.072021484375,
1108
+ "learning_rate": 1e-05,
1109
+ "loss": -0.0007,
1110
+ "reward": 0.541468620300293,
1111
+ "reward_std": 0.08621817082166672,
1112
+ "rewards/length_reward": 0.09285711124539375,
1113
+ "rewards/similarity_reward": 0.44861146807670593,
1114
+ "step": 85
1115
+ }
1116
+ ],
1117
+ "logging_steps": 1,
1118
+ "max_steps": 85,
1119
+ "num_input_tokens_seen": 0,
1120
+ "num_train_epochs": 1,
1121
+ "save_steps": 500,
1122
+ "stateful_callbacks": {
1123
+ "TrainerControl": {
1124
+ "args": {
1125
+ "should_epoch_stop": false,
1126
+ "should_evaluate": false,
1127
+ "should_log": false,
1128
+ "should_save": true,
1129
+ "should_training_stop": true
1130
+ },
1131
+ "attributes": {}
1132
+ }
1133
+ },
1134
+ "total_flos": 0.0,
1135
+ "train_batch_size": 26,
1136
+ "trial_name": null,
1137
+ "trial_params": null
1138
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03d0111fd8a293c856ba42e3b75f2eb8db3853a1e7d4202b6cd7528954fd3ca9
3
+ size 7864
vocab.json ADDED
The diff for this file is too large to render. See raw diff