JW17 commited on
Commit
7ba716b
1 Parent(s): 51653a5

Model save

Browse files
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: mistralai/Mistral-7B-v0.1
4
+ tags:
5
+ - trl
6
+ - orpo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: kaist-mistral-orpo-OHP-15k-Mathcode
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # kaist-mistral-orpo-OHP-15k-Mathcode
17
+
18
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on an unknown dataset.
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 5e-06
38
+ - train_batch_size: 8
39
+ - eval_batch_size: 8
40
+ - seed: 42
41
+ - distributed_type: multi-GPU
42
+ - num_devices: 4
43
+ - gradient_accumulation_steps: 2
44
+ - total_train_batch_size: 64
45
+ - total_eval_batch_size: 32
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: cosine
48
+ - num_epochs: 3
49
+
50
+ ### Training results
51
+
52
+
53
+
54
+ ### Framework versions
55
+
56
+ - Transformers 4.39.3
57
+ - Pytorch 2.1.2.post303
58
+ - Datasets 2.18.0
59
+ - Tokenizers 0.15.2
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.3926089354220279,
4
+ "train_runtime": 9824.6416,
5
+ "train_samples": 14963,
6
+ "train_samples_per_second": 4.569,
7
+ "train_steps_per_second": 0.071
8
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.39.3"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:530727d17679ed1bb28a8868fea4ac21785730dcbed50b4b36ff0bdbace38968
3
+ size 4943162336
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:481d3979e9e98525e3aa070ae715ec0fea335e00706dab3989e4e93926b622a7
3
+ size 4999819336
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a049612c9c82fff6b809d90893ce0fd8e7aa48d221dac6db6dfb2c2bcf8c0068
3
+ size 4540516344
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14483464192
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.3926089354220279,
4
+ "train_runtime": 9824.6416,
5
+ "train_samples": 14963,
6
+ "train_samples_per_second": 4.569,
7
+ "train_steps_per_second": 0.071
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 702,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.04,
13
+ "grad_norm": 3.8125,
14
+ "learning_rate": 4.997496987311431e-06,
15
+ "log_odds_chosen": 0.16457681357860565,
16
+ "log_odds_ratio": -0.6622492671012878,
17
+ "logits/chosen": -2.9562315940856934,
18
+ "logits/rejected": -2.943472385406494,
19
+ "logps/chosen": -0.6016947031021118,
20
+ "logps/rejected": -0.675441563129425,
21
+ "loss": 0.9961,
22
+ "nll_loss": 0.7968690991401672,
23
+ "rewards/accuracies": 0.6000000238418579,
24
+ "rewards/chosen": -0.12033893167972565,
25
+ "rewards/margins": 0.014749372377991676,
26
+ "rewards/rejected": -0.13508832454681396,
27
+ "step": 10
28
+ },
29
+ {
30
+ "epoch": 0.09,
31
+ "grad_norm": 3.5,
32
+ "learning_rate": 4.989992961303738e-06,
33
+ "log_odds_chosen": 0.09340940415859222,
34
+ "log_odds_ratio": -0.6970354914665222,
35
+ "logits/chosen": -3.0346553325653076,
36
+ "logits/rejected": -3.047511577606201,
37
+ "logps/chosen": -0.5685297846794128,
38
+ "logps/rejected": -0.6139942407608032,
39
+ "loss": 0.4901,
40
+ "nll_loss": 0.35993438959121704,
41
+ "rewards/accuracies": 0.5249999761581421,
42
+ "rewards/chosen": -0.11370595544576645,
43
+ "rewards/margins": 0.00909288041293621,
44
+ "rewards/rejected": -0.1227988451719284,
45
+ "step": 20
46
+ },
47
+ {
48
+ "epoch": 0.13,
49
+ "grad_norm": 3.703125,
50
+ "learning_rate": 4.977502948114772e-06,
51
+ "log_odds_chosen": 0.06927172094583511,
52
+ "log_odds_ratio": -0.7004188895225525,
53
+ "logits/chosen": -3.142512798309326,
54
+ "logits/rejected": -3.1354269981384277,
55
+ "logps/chosen": -0.5674165487289429,
56
+ "logps/rejected": -0.590361475944519,
57
+ "loss": 0.4797,
58
+ "nll_loss": 0.3510151505470276,
59
+ "rewards/accuracies": 0.581250011920929,
60
+ "rewards/chosen": -0.11348330974578857,
61
+ "rewards/margins": 0.00458899512887001,
62
+ "rewards/rejected": -0.11807229369878769,
63
+ "step": 30
64
+ },
65
+ {
66
+ "epoch": 0.17,
67
+ "grad_norm": 3.484375,
68
+ "learning_rate": 4.960051957873726e-06,
69
+ "log_odds_chosen": 0.049079131335020065,
70
+ "log_odds_ratio": -0.709546685218811,
71
+ "logits/chosen": -3.2224674224853516,
72
+ "logits/rejected": -3.239351749420166,
73
+ "logps/chosen": -0.564799427986145,
74
+ "logps/rejected": -0.5826238393783569,
75
+ "loss": 0.4883,
76
+ "nll_loss": 0.3356490135192871,
77
+ "rewards/accuracies": 0.53125,
78
+ "rewards/chosen": -0.11295988410711288,
79
+ "rewards/margins": 0.003564885351806879,
80
+ "rewards/rejected": -0.11652477085590363,
81
+ "step": 40
82
+ },
83
+ {
84
+ "epoch": 0.21,
85
+ "grad_norm": 3.59375,
86
+ "learning_rate": 4.937674934620601e-06,
87
+ "log_odds_chosen": 0.1465633362531662,
88
+ "log_odds_ratio": -0.6634026169776917,
89
+ "logits/chosen": -3.2599196434020996,
90
+ "logits/rejected": -3.257382869720459,
91
+ "logps/chosen": -0.5410924553871155,
92
+ "logps/rejected": -0.6060024499893188,
93
+ "loss": 0.4577,
94
+ "nll_loss": 0.3106100261211395,
95
+ "rewards/accuracies": 0.6187499761581421,
96
+ "rewards/chosen": -0.10821850597858429,
97
+ "rewards/margins": 0.012981968931853771,
98
+ "rewards/rejected": -0.12120047956705093,
99
+ "step": 50
100
+ },
101
+ {
102
+ "epoch": 0.26,
103
+ "grad_norm": 3.234375,
104
+ "learning_rate": 4.9104166863339065e-06,
105
+ "log_odds_chosen": 0.061258088797330856,
106
+ "log_odds_ratio": -0.7155165076255798,
107
+ "logits/chosen": -3.3071017265319824,
108
+ "logits/rejected": -3.3195369243621826,
109
+ "logps/chosen": -0.6092182397842407,
110
+ "logps/rejected": -0.6360430121421814,
111
+ "loss": 0.494,
112
+ "nll_loss": 0.3727017939090729,
113
+ "rewards/accuracies": 0.5687500238418579,
114
+ "rewards/chosen": -0.12184363603591919,
115
+ "rewards/margins": 0.005364974495023489,
116
+ "rewards/rejected": -0.1272086203098297,
117
+ "step": 60
118
+ },
119
+ {
120
+ "epoch": 0.3,
121
+ "grad_norm": 3.28125,
122
+ "learning_rate": 4.878331795206705e-06,
123
+ "log_odds_chosen": 0.09167635440826416,
124
+ "log_odds_ratio": -0.7004056572914124,
125
+ "logits/chosen": -3.2954325675964355,
126
+ "logits/rejected": -3.311220169067383,
127
+ "logps/chosen": -0.5609530210494995,
128
+ "logps/rejected": -0.6090016961097717,
129
+ "loss": 0.4762,
130
+ "nll_loss": 0.31441202759742737,
131
+ "rewards/accuracies": 0.5562499761581421,
132
+ "rewards/chosen": -0.1121906191110611,
133
+ "rewards/margins": 0.009609726257622242,
134
+ "rewards/rejected": -0.12180032581090927,
135
+ "step": 70
136
+ },
137
+ {
138
+ "epoch": 0.34,
139
+ "grad_norm": 3.40625,
140
+ "learning_rate": 4.841484508350679e-06,
141
+ "log_odds_chosen": 0.1308586597442627,
142
+ "log_odds_ratio": -0.6873519420623779,
143
+ "logits/chosen": -3.2609565258026123,
144
+ "logits/rejected": -3.302786350250244,
145
+ "logps/chosen": -0.5968475937843323,
146
+ "logps/rejected": -0.6581549644470215,
147
+ "loss": 0.4895,
148
+ "nll_loss": 0.36970850825309753,
149
+ "rewards/accuracies": 0.5874999761581421,
150
+ "rewards/chosen": -0.1193695068359375,
151
+ "rewards/margins": 0.012261486612260342,
152
+ "rewards/rejected": -0.13163098692893982,
153
+ "step": 80
154
+ },
155
+ {
156
+ "epoch": 0.38,
157
+ "grad_norm": 3.78125,
158
+ "learning_rate": 4.799948609147061e-06,
159
+ "log_odds_chosen": 0.06979366391897202,
160
+ "log_odds_ratio": -0.7083112001419067,
161
+ "logits/chosen": -3.289924144744873,
162
+ "logits/rejected": -3.326308012008667,
163
+ "logps/chosen": -0.6024026274681091,
164
+ "logps/rejected": -0.6323495507240295,
165
+ "loss": 0.4868,
166
+ "nll_loss": 0.376103937625885,
167
+ "rewards/accuracies": 0.581250011920929,
168
+ "rewards/chosen": -0.12048052251338959,
169
+ "rewards/margins": 0.005989375524222851,
170
+ "rewards/rejected": -0.12646988034248352,
171
+ "step": 90
172
+ },
173
+ {
174
+ "epoch": 0.43,
175
+ "grad_norm": 3.4375,
176
+ "learning_rate": 4.753807269502041e-06,
177
+ "log_odds_chosen": 0.12073998153209686,
178
+ "log_odds_ratio": -0.6814596652984619,
179
+ "logits/chosen": -3.2658774852752686,
180
+ "logits/rejected": -3.276677370071411,
181
+ "logps/chosen": -0.5358774662017822,
182
+ "logps/rejected": -0.5820270776748657,
183
+ "loss": 0.4686,
184
+ "nll_loss": 0.3315598964691162,
185
+ "rewards/accuracies": 0.550000011920929,
186
+ "rewards/chosen": -0.10717550665140152,
187
+ "rewards/margins": 0.009229922667145729,
188
+ "rewards/rejected": -0.1164054125547409,
189
+ "step": 100
190
+ },
191
+ {
192
+ "epoch": 0.47,
193
+ "grad_norm": 3.140625,
194
+ "learning_rate": 4.703152883302498e-06,
195
+ "log_odds_chosen": 0.12317148596048355,
196
+ "log_odds_ratio": -0.6822798848152161,
197
+ "logits/chosen": -3.279986619949341,
198
+ "logits/rejected": -3.3097903728485107,
199
+ "logps/chosen": -0.5460432171821594,
200
+ "logps/rejected": -0.6105918884277344,
201
+ "loss": 0.4745,
202
+ "nll_loss": 0.3556433618068695,
203
+ "rewards/accuracies": 0.5625,
204
+ "rewards/chosen": -0.10920864343643188,
205
+ "rewards/margins": 0.01290974486619234,
206
+ "rewards/rejected": -0.12211839854717255,
207
+ "step": 110
208
+ },
209
+ {
210
+ "epoch": 0.51,
211
+ "grad_norm": 3.03125,
212
+ "learning_rate": 4.648086881405542e-06,
213
+ "log_odds_chosen": 0.11489371955394745,
214
+ "log_odds_ratio": -0.689535915851593,
215
+ "logits/chosen": -3.255175828933716,
216
+ "logits/rejected": -3.275517225265503,
217
+ "logps/chosen": -0.5755141377449036,
218
+ "logps/rejected": -0.6235055923461914,
219
+ "loss": 0.475,
220
+ "nll_loss": 0.32820188999176025,
221
+ "rewards/accuracies": 0.581250011920929,
222
+ "rewards/chosen": -0.11510282754898071,
223
+ "rewards/margins": 0.00959828682243824,
224
+ "rewards/rejected": -0.1247011050581932,
225
+ "step": 120
226
+ },
227
+ {
228
+ "epoch": 0.56,
229
+ "grad_norm": 2.890625,
230
+ "learning_rate": 4.588719528532342e-06,
231
+ "log_odds_chosen": 0.09126593172550201,
232
+ "log_odds_ratio": -0.697300910949707,
233
+ "logits/chosen": -3.2629482746124268,
234
+ "logits/rejected": -3.2472147941589355,
235
+ "logps/chosen": -0.5701687335968018,
236
+ "logps/rejected": -0.618691623210907,
237
+ "loss": 0.4766,
238
+ "nll_loss": 0.3278309404850006,
239
+ "rewards/accuracies": 0.5249999761581421,
240
+ "rewards/chosen": -0.1140337586402893,
241
+ "rewards/margins": 0.00970458984375,
242
+ "rewards/rejected": -0.12373832613229752,
243
+ "step": 130
244
+ },
245
+ {
246
+ "epoch": 0.6,
247
+ "grad_norm": 3.234375,
248
+ "learning_rate": 4.525169702472917e-06,
249
+ "log_odds_chosen": 0.055292803794145584,
250
+ "log_odds_ratio": -0.7131302356719971,
251
+ "logits/chosen": -3.1946372985839844,
252
+ "logits/rejected": -3.208055019378662,
253
+ "logps/chosen": -0.5659917593002319,
254
+ "logps/rejected": -0.5857821702957153,
255
+ "loss": 0.4741,
256
+ "nll_loss": 0.32717862725257874,
257
+ "rewards/accuracies": 0.512499988079071,
258
+ "rewards/chosen": -0.11319835484027863,
259
+ "rewards/margins": 0.003958091139793396,
260
+ "rewards/rejected": -0.11715643107891083,
261
+ "step": 140
262
+ },
263
+ {
264
+ "epoch": 0.64,
265
+ "grad_norm": 3.28125,
266
+ "learning_rate": 4.457564656044056e-06,
267
+ "log_odds_chosen": 0.10363437980413437,
268
+ "log_odds_ratio": -0.6924600005149841,
269
+ "logits/chosen": -3.227095127105713,
270
+ "logits/rejected": -3.269847869873047,
271
+ "logps/chosen": -0.540108859539032,
272
+ "logps/rejected": -0.5899955034255981,
273
+ "loss": 0.4874,
274
+ "nll_loss": 0.3099691569805145,
275
+ "rewards/accuracies": 0.6000000238418579,
276
+ "rewards/chosen": -0.10802175849676132,
277
+ "rewards/margins": 0.009977328591048717,
278
+ "rewards/rejected": -0.11799909919500351,
279
+ "step": 150
280
+ },
281
+ {
282
+ "epoch": 0.68,
283
+ "grad_norm": 3.25,
284
+ "learning_rate": 4.386039762276976e-06,
285
+ "log_odds_chosen": 0.12246614694595337,
286
+ "log_odds_ratio": -0.6819506883621216,
287
+ "logits/chosen": -3.1787781715393066,
288
+ "logits/rejected": -3.1843771934509277,
289
+ "logps/chosen": -0.556765615940094,
290
+ "logps/rejected": -0.6073147058486938,
291
+ "loss": 0.4718,
292
+ "nll_loss": 0.34596166014671326,
293
+ "rewards/accuracies": 0.59375,
294
+ "rewards/chosen": -0.11135313659906387,
295
+ "rewards/margins": 0.010109812021255493,
296
+ "rewards/rejected": -0.12146292626857758,
297
+ "step": 160
298
+ },
299
+ {
300
+ "epoch": 0.73,
301
+ "grad_norm": 3.609375,
302
+ "learning_rate": 4.310738243344996e-06,
303
+ "log_odds_chosen": 0.09136945009231567,
304
+ "log_odds_ratio": -0.6929733753204346,
305
+ "logits/chosen": -3.207054853439331,
306
+ "logits/rejected": -3.225612163543701,
307
+ "logps/chosen": -0.5217511653900146,
308
+ "logps/rejected": -0.5743865966796875,
309
+ "loss": 0.4775,
310
+ "nll_loss": 0.34531891345977783,
311
+ "rewards/accuracies": 0.550000011920929,
312
+ "rewards/chosen": -0.10435023158788681,
313
+ "rewards/margins": 0.010527082718908787,
314
+ "rewards/rejected": -0.11487732082605362,
315
+ "step": 170
316
+ },
317
+ {
318
+ "epoch": 0.77,
319
+ "grad_norm": 3.21875,
320
+ "learning_rate": 4.231810883773999e-06,
321
+ "log_odds_chosen": 0.13569065928459167,
322
+ "log_odds_ratio": -0.6776682138442993,
323
+ "logits/chosen": -3.176665782928467,
324
+ "logits/rejected": -3.197835922241211,
325
+ "logps/chosen": -0.5234608054161072,
326
+ "logps/rejected": -0.5825861692428589,
327
+ "loss": 0.4797,
328
+ "nll_loss": 0.358028769493103,
329
+ "rewards/accuracies": 0.5687500238418579,
330
+ "rewards/chosen": -0.10469217598438263,
331
+ "rewards/margins": 0.011825057677924633,
332
+ "rewards/rejected": -0.11651722341775894,
333
+ "step": 180
334
+ },
335
+ {
336
+ "epoch": 0.81,
337
+ "grad_norm": 2.96875,
338
+ "learning_rate": 4.149415728509971e-06,
339
+ "log_odds_chosen": 0.11225181818008423,
340
+ "log_odds_ratio": -0.6859180331230164,
341
+ "logits/chosen": -3.2352004051208496,
342
+ "logits/rejected": -3.2568259239196777,
343
+ "logps/chosen": -0.5570780634880066,
344
+ "logps/rejected": -0.6075831651687622,
345
+ "loss": 0.4563,
346
+ "nll_loss": 0.33723753690719604,
347
+ "rewards/accuracies": 0.5687500238418579,
348
+ "rewards/chosen": -0.11141560971736908,
349
+ "rewards/margins": 0.010101011954247952,
350
+ "rewards/rejected": -0.1215166226029396,
351
+ "step": 190
352
+ },
353
+ {
354
+ "epoch": 0.85,
355
+ "grad_norm": 3.09375,
356
+ "learning_rate": 4.063717766448194e-06,
357
+ "log_odds_chosen": 0.09286753088235855,
358
+ "log_odds_ratio": -0.6982024908065796,
359
+ "logits/chosen": -3.177328109741211,
360
+ "logits/rejected": -3.204103946685791,
361
+ "logps/chosen": -0.5489121675491333,
362
+ "logps/rejected": -0.5975584387779236,
363
+ "loss": 0.4871,
364
+ "nll_loss": 0.36433055996894836,
365
+ "rewards/accuracies": 0.543749988079071,
366
+ "rewards/chosen": -0.10978243499994278,
367
+ "rewards/margins": 0.009729253128170967,
368
+ "rewards/rejected": -0.1195116862654686,
369
+ "step": 200
370
+ },
371
+ {
372
+ "epoch": 0.9,
373
+ "grad_norm": 3.234375,
374
+ "learning_rate": 3.974888600057808e-06,
375
+ "log_odds_chosen": 0.06380079686641693,
376
+ "log_odds_ratio": -0.7078055143356323,
377
+ "logits/chosen": -3.179089307785034,
378
+ "logits/rejected": -3.1974921226501465,
379
+ "logps/chosen": -0.5259910821914673,
380
+ "logps/rejected": -0.5546124577522278,
381
+ "loss": 0.4842,
382
+ "nll_loss": 0.3254118859767914,
383
+ "rewards/accuracies": 0.53125,
384
+ "rewards/chosen": -0.1051982045173645,
385
+ "rewards/margins": 0.0057242861948907375,
386
+ "rewards/rejected": -0.11092247813940048,
387
+ "step": 210
388
+ },
389
+ {
390
+ "epoch": 0.94,
391
+ "grad_norm": 3.1875,
392
+ "learning_rate": 3.883106101763285e-06,
393
+ "log_odds_chosen": 0.07409624010324478,
394
+ "log_odds_ratio": -0.7109658122062683,
395
+ "logits/chosen": -3.2040557861328125,
396
+ "logits/rejected": -3.232853651046753,
397
+ "logps/chosen": -0.5239503979682922,
398
+ "logps/rejected": -0.5682771801948547,
399
+ "loss": 0.4671,
400
+ "nll_loss": 0.32592883706092834,
401
+ "rewards/accuracies": 0.518750011920929,
402
+ "rewards/chosen": -0.10479007661342621,
403
+ "rewards/margins": 0.008865350857377052,
404
+ "rewards/rejected": -0.1136554479598999,
405
+ "step": 220
406
+ },
407
+ {
408
+ "epoch": 0.98,
409
+ "grad_norm": 3.375,
410
+ "learning_rate": 3.7885540577708806e-06,
411
+ "log_odds_chosen": 0.17323985695838928,
412
+ "log_odds_ratio": -0.6644941568374634,
413
+ "logits/chosen": -3.242084503173828,
414
+ "logits/rejected": -3.256577730178833,
415
+ "logps/chosen": -0.5062168836593628,
416
+ "logps/rejected": -0.5952838063240051,
417
+ "loss": 0.4719,
418
+ "nll_loss": 0.31021052598953247,
419
+ "rewards/accuracies": 0.606249988079071,
420
+ "rewards/chosen": -0.10124337673187256,
421
+ "rewards/margins": 0.01781339757144451,
422
+ "rewards/rejected": -0.11905677616596222,
423
+ "step": 230
424
+ },
425
+ {
426
+ "epoch": 1.03,
427
+ "grad_norm": 3.84375,
428
+ "learning_rate": 3.6914218000532697e-06,
429
+ "log_odds_chosen": 0.369870126247406,
430
+ "log_odds_ratio": -0.5746415257453918,
431
+ "logits/chosen": -3.2143750190734863,
432
+ "logits/rejected": -3.2405693531036377,
433
+ "logps/chosen": -0.4556041657924652,
434
+ "logps/rejected": -0.6035604476928711,
435
+ "loss": 0.4236,
436
+ "nll_loss": 0.2927762269973755,
437
+ "rewards/accuracies": 0.6937500238418579,
438
+ "rewards/chosen": -0.09112082421779633,
439
+ "rewards/margins": 0.029591253027319908,
440
+ "rewards/rejected": -0.12071208655834198,
441
+ "step": 240
442
+ },
443
+ {
444
+ "epoch": 1.07,
445
+ "grad_norm": 3.8125,
446
+ "learning_rate": 3.5919038272292824e-06,
447
+ "log_odds_chosen": 0.6750356554985046,
448
+ "log_odds_ratio": -0.4590001702308655,
449
+ "logits/chosen": -3.1941792964935303,
450
+ "logits/rejected": -3.229973554611206,
451
+ "logps/chosen": -0.41714033484458923,
452
+ "logps/rejected": -0.676815390586853,
453
+ "loss": 0.3754,
454
+ "nll_loss": 0.2869933545589447,
455
+ "rewards/accuracies": 0.8374999761581421,
456
+ "rewards/chosen": -0.08342806994915009,
457
+ "rewards/margins": 0.05193500965833664,
458
+ "rewards/rejected": -0.13536307215690613,
459
+ "step": 250
460
+ },
461
+ {
462
+ "epoch": 1.11,
463
+ "grad_norm": 4.0,
464
+ "learning_rate": 3.4901994150978926e-06,
465
+ "log_odds_chosen": 0.7411336302757263,
466
+ "log_odds_ratio": -0.43500590324401855,
467
+ "logits/chosen": -3.248504161834717,
468
+ "logits/rejected": -3.2485687732696533,
469
+ "logps/chosen": -0.45796918869018555,
470
+ "logps/rejected": -0.7692248225212097,
471
+ "loss": 0.3752,
472
+ "nll_loss": 0.29675063490867615,
473
+ "rewards/accuracies": 0.8812500238418579,
474
+ "rewards/chosen": -0.09159384667873383,
475
+ "rewards/margins": 0.06225113198161125,
476
+ "rewards/rejected": -0.15384498238563538,
477
+ "step": 260
478
+ },
479
+ {
480
+ "epoch": 1.15,
481
+ "grad_norm": 4.125,
482
+ "learning_rate": 3.386512217606339e-06,
483
+ "log_odds_chosen": 0.672085702419281,
484
+ "log_odds_ratio": -0.4590581953525543,
485
+ "logits/chosen": -3.210575580596924,
486
+ "logits/rejected": -3.2388033866882324,
487
+ "logps/chosen": -0.4334833025932312,
488
+ "logps/rejected": -0.7109585404396057,
489
+ "loss": 0.3831,
490
+ "nll_loss": 0.29120856523513794,
491
+ "rewards/accuracies": 0.8374999761581421,
492
+ "rewards/chosen": -0.08669666945934296,
493
+ "rewards/margins": 0.055495042353868484,
494
+ "rewards/rejected": -0.14219172298908234,
495
+ "step": 270
496
+ },
497
+ {
498
+ "epoch": 1.2,
499
+ "grad_norm": 4.1875,
500
+ "learning_rate": 3.281049859051394e-06,
501
+ "log_odds_chosen": 0.6232529282569885,
502
+ "log_odds_ratio": -0.4691483974456787,
503
+ "logits/chosen": -3.1913466453552246,
504
+ "logits/rejected": -3.213522434234619,
505
+ "logps/chosen": -0.457376629114151,
506
+ "logps/rejected": -0.7198972702026367,
507
+ "loss": 0.3748,
508
+ "nll_loss": 0.2882610559463501,
509
+ "rewards/accuracies": 0.8374999761581421,
510
+ "rewards/chosen": -0.09147532284259796,
511
+ "rewards/margins": 0.05250414088368416,
512
+ "rewards/rejected": -0.14397946000099182,
513
+ "step": 280
514
+ },
515
+ {
516
+ "epoch": 1.24,
517
+ "grad_norm": 4.125,
518
+ "learning_rate": 3.17402351833036e-06,
519
+ "log_odds_chosen": 0.710340678691864,
520
+ "log_odds_ratio": -0.44652828574180603,
521
+ "logits/chosen": -3.149742841720581,
522
+ "logits/rejected": -3.189366579055786,
523
+ "logps/chosen": -0.43936777114868164,
524
+ "logps/rejected": -0.72941654920578,
525
+ "loss": 0.3629,
526
+ "nll_loss": 0.29283395409584045,
527
+ "rewards/accuracies": 0.8687499761581421,
528
+ "rewards/chosen": -0.08787355571985245,
529
+ "rewards/margins": 0.05800975114107132,
530
+ "rewards/rejected": -0.14588329195976257,
531
+ "step": 290
532
+ },
533
+ {
534
+ "epoch": 1.28,
535
+ "grad_norm": 4.09375,
536
+ "learning_rate": 3.0656475060743065e-06,
537
+ "log_odds_chosen": 0.7323763966560364,
538
+ "log_odds_ratio": -0.4345877766609192,
539
+ "logits/chosen": -3.1514878273010254,
540
+ "logits/rejected": -3.199571132659912,
541
+ "logps/chosen": -0.44486841559410095,
542
+ "logps/rejected": -0.7583059072494507,
543
+ "loss": 0.3627,
544
+ "nll_loss": 0.29908788204193115,
545
+ "rewards/accuracies": 0.887499988079071,
546
+ "rewards/chosen": -0.08897367864847183,
547
+ "rewards/margins": 0.06268750131130219,
548
+ "rewards/rejected": -0.15166118741035461,
549
+ "step": 300
550
+ },
551
+ {
552
+ "epoch": 1.32,
553
+ "grad_norm": 4.4375,
554
+ "learning_rate": 2.956138835510282e-06,
555
+ "log_odds_chosen": 0.7746503353118896,
556
+ "log_odds_ratio": -0.42378655076026917,
557
+ "logits/chosen": -3.1741645336151123,
558
+ "logits/rejected": -3.2151100635528564,
559
+ "logps/chosen": -0.4183032512664795,
560
+ "logps/rejected": -0.7279097437858582,
561
+ "loss": 0.3604,
562
+ "nll_loss": 0.26949578523635864,
563
+ "rewards/accuracies": 0.875,
564
+ "rewards/chosen": -0.08366064727306366,
565
+ "rewards/margins": 0.06192130595445633,
566
+ "rewards/rejected": -0.14558196067810059,
567
+ "step": 310
568
+ },
569
+ {
570
+ "epoch": 1.37,
571
+ "grad_norm": 4.28125,
572
+ "learning_rate": 2.8457167879118332e-06,
573
+ "log_odds_chosen": 0.7689862251281738,
574
+ "log_odds_ratio": -0.43063870072364807,
575
+ "logits/chosen": -3.159554958343506,
576
+ "logits/rejected": -3.188426971435547,
577
+ "logps/chosen": -0.3951800465583801,
578
+ "logps/rejected": -0.7116678953170776,
579
+ "loss": 0.3763,
580
+ "nll_loss": 0.27707797288894653,
581
+ "rewards/accuracies": 0.8687499761581421,
582
+ "rewards/chosen": -0.07903601229190826,
583
+ "rewards/margins": 0.0632975846529007,
584
+ "rewards/rejected": -0.14233359694480896,
585
+ "step": 320
586
+ },
587
+ {
588
+ "epoch": 1.41,
589
+ "grad_norm": 3.921875,
590
+ "learning_rate": 2.7346024735079483e-06,
591
+ "log_odds_chosen": 0.7219299674034119,
592
+ "log_odds_ratio": -0.44966334104537964,
593
+ "logits/chosen": -3.127485752105713,
594
+ "logits/rejected": -3.198312997817993,
595
+ "logps/chosen": -0.41490626335144043,
596
+ "logps/rejected": -0.7052132487297058,
597
+ "loss": 0.3787,
598
+ "nll_loss": 0.2818581163883209,
599
+ "rewards/accuracies": 0.831250011920929,
600
+ "rewards/chosen": -0.08298125118017197,
601
+ "rewards/margins": 0.058061398565769196,
602
+ "rewards/rejected": -0.14104264974594116,
603
+ "step": 330
604
+ },
605
+ {
606
+ "epoch": 1.45,
607
+ "grad_norm": 4.34375,
608
+ "learning_rate": 2.6230183887296955e-06,
609
+ "log_odds_chosen": 0.7794576287269592,
610
+ "log_odds_ratio": -0.42089906334877014,
611
+ "logits/chosen": -3.129941463470459,
612
+ "logits/rejected": -3.1785807609558105,
613
+ "logps/chosen": -0.4315931797027588,
614
+ "logps/rejected": -0.7535022497177124,
615
+ "loss": 0.3603,
616
+ "nll_loss": 0.30001071095466614,
617
+ "rewards/accuracies": 0.8687499761581421,
618
+ "rewards/chosen": -0.08631863445043564,
619
+ "rewards/margins": 0.06438182294368744,
620
+ "rewards/rejected": -0.15070047974586487,
621
+ "step": 340
622
+ },
623
+ {
624
+ "epoch": 1.5,
625
+ "grad_norm": 4.53125,
626
+ "learning_rate": 2.511187970681109e-06,
627
+ "log_odds_chosen": 0.6614913940429688,
628
+ "log_odds_ratio": -0.4707488417625427,
629
+ "logits/chosen": -3.1603000164031982,
630
+ "logits/rejected": -3.1471877098083496,
631
+ "logps/chosen": -0.4360755980014801,
632
+ "logps/rejected": -0.6958964467048645,
633
+ "loss": 0.3618,
634
+ "nll_loss": 0.2804470360279083,
635
+ "rewards/accuracies": 0.824999988079071,
636
+ "rewards/chosen": -0.0872151255607605,
637
+ "rewards/margins": 0.0519641637802124,
638
+ "rewards/rejected": -0.1391792744398117,
639
+ "step": 350
640
+ },
641
+ {
642
+ "epoch": 1.54,
643
+ "grad_norm": 3.828125,
644
+ "learning_rate": 2.399335149726463e-06,
645
+ "log_odds_chosen": 0.6727465391159058,
646
+ "log_odds_ratio": -0.4600442945957184,
647
+ "logits/chosen": -3.1209638118743896,
648
+ "logits/rejected": -3.1621603965759277,
649
+ "logps/chosen": -0.4566575884819031,
650
+ "logps/rejected": -0.7309259176254272,
651
+ "loss": 0.3719,
652
+ "nll_loss": 0.3025999665260315,
653
+ "rewards/accuracies": 0.824999988079071,
654
+ "rewards/chosen": -0.09133151918649673,
655
+ "rewards/margins": 0.0548536591231823,
656
+ "rewards/rejected": -0.14618518948554993,
657
+ "step": 360
658
+ },
659
+ {
660
+ "epoch": 1.58,
661
+ "grad_norm": 4.46875,
662
+ "learning_rate": 2.2876839010898377e-06,
663
+ "log_odds_chosen": 0.7592717409133911,
664
+ "log_odds_ratio": -0.43831682205200195,
665
+ "logits/chosen": -3.1502082347869873,
666
+ "logits/rejected": -3.207881212234497,
667
+ "logps/chosen": -0.42622488737106323,
668
+ "logps/rejected": -0.7278318405151367,
669
+ "loss": 0.3498,
670
+ "nll_loss": 0.2541278004646301,
671
+ "rewards/accuracies": 0.8812500238418579,
672
+ "rewards/chosen": -0.08524497598409653,
673
+ "rewards/margins": 0.0603213906288147,
674
+ "rewards/rejected": -0.14556637406349182,
675
+ "step": 370
676
+ },
677
+ {
678
+ "epoch": 1.62,
679
+ "grad_norm": 4.90625,
680
+ "learning_rate": 2.1764577963648616e-06,
681
+ "log_odds_chosen": 0.6783360838890076,
682
+ "log_odds_ratio": -0.46248960494995117,
683
+ "logits/chosen": -3.146242141723633,
684
+ "logits/rejected": -3.164093017578125,
685
+ "logps/chosen": -0.42164331674575806,
686
+ "logps/rejected": -0.6987554430961609,
687
+ "loss": 0.3636,
688
+ "nll_loss": 0.30008167028427124,
689
+ "rewards/accuracies": 0.84375,
690
+ "rewards/chosen": -0.08432865887880325,
691
+ "rewards/margins": 0.05542243644595146,
692
+ "rewards/rejected": -0.1397511065006256,
693
+ "step": 380
694
+ },
695
+ {
696
+ "epoch": 1.67,
697
+ "grad_norm": 4.4375,
698
+ "learning_rate": 2.0658795558326745e-06,
699
+ "log_odds_chosen": 0.6860665082931519,
700
+ "log_odds_ratio": -0.44539427757263184,
701
+ "logits/chosen": -3.1428816318511963,
702
+ "logits/rejected": -3.202255964279175,
703
+ "logps/chosen": -0.4127088487148285,
704
+ "logps/rejected": -0.6815465688705444,
705
+ "loss": 0.3548,
706
+ "nll_loss": 0.25606799125671387,
707
+ "rewards/accuracies": 0.8812500238418579,
708
+ "rewards/chosen": -0.08254177868366241,
709
+ "rewards/margins": 0.05376753211021423,
710
+ "rewards/rejected": -0.13630932569503784,
711
+ "step": 390
712
+ },
713
+ {
714
+ "epoch": 1.71,
715
+ "grad_norm": 4.15625,
716
+ "learning_rate": 1.956170602484582e-06,
717
+ "log_odds_chosen": 0.8614572286605835,
718
+ "log_odds_ratio": -0.3955555558204651,
719
+ "logits/chosen": -3.1366238594055176,
720
+ "logits/rejected": -3.1604573726654053,
721
+ "logps/chosen": -0.3913766145706177,
722
+ "logps/rejected": -0.7423630952835083,
723
+ "loss": 0.3487,
724
+ "nll_loss": 0.24887590110301971,
725
+ "rewards/accuracies": 0.887499988079071,
726
+ "rewards/chosen": -0.07827533036470413,
727
+ "rewards/margins": 0.07019731402397156,
728
+ "rewards/rejected": -0.1484726369380951,
729
+ "step": 400
730
+ },
731
+ {
732
+ "epoch": 1.75,
733
+ "grad_norm": 4.625,
734
+ "learning_rate": 1.8475506186424075e-06,
735
+ "log_odds_chosen": 0.7284751534461975,
736
+ "log_odds_ratio": -0.4521869122982025,
737
+ "logits/chosen": -3.1315054893493652,
738
+ "logits/rejected": -3.1535370349884033,
739
+ "logps/chosen": -0.4371741712093353,
740
+ "logps/rejected": -0.7353378534317017,
741
+ "loss": 0.3711,
742
+ "nll_loss": 0.2623272240161896,
743
+ "rewards/accuracies": 0.8500000238418579,
744
+ "rewards/chosen": -0.08743483573198318,
745
+ "rewards/margins": 0.059632737189531326,
746
+ "rewards/rejected": -0.1470675766468048,
747
+ "step": 410
748
+ },
749
+ {
750
+ "epoch": 1.79,
751
+ "grad_norm": 3.796875,
752
+ "learning_rate": 1.740237106064383e-06,
753
+ "log_odds_chosen": 0.7518190145492554,
754
+ "log_odds_ratio": -0.43122825026512146,
755
+ "logits/chosen": -3.155817747116089,
756
+ "logits/rejected": -3.1722676753997803,
757
+ "logps/chosen": -0.44021105766296387,
758
+ "logps/rejected": -0.752731442451477,
759
+ "loss": 0.3521,
760
+ "nll_loss": 0.27316415309906006,
761
+ "rewards/accuracies": 0.8687499761581421,
762
+ "rewards/chosen": -0.08804221451282501,
763
+ "rewards/margins": 0.06250409036874771,
764
+ "rewards/rejected": -0.15054629743099213,
765
+ "step": 420
766
+ },
767
+ {
768
+ "epoch": 1.84,
769
+ "grad_norm": 4.0625,
770
+ "learning_rate": 1.6344449504174193e-06,
771
+ "log_odds_chosen": 0.7577536106109619,
772
+ "log_odds_ratio": -0.4262828230857849,
773
+ "logits/chosen": -3.112555980682373,
774
+ "logits/rejected": -3.175693988800049,
775
+ "logps/chosen": -0.4249524176120758,
776
+ "logps/rejected": -0.7365177273750305,
777
+ "loss": 0.3772,
778
+ "nll_loss": 0.29327088594436646,
779
+ "rewards/accuracies": 0.875,
780
+ "rewards/chosen": -0.084990493953228,
781
+ "rewards/margins": 0.06231304258108139,
782
+ "rewards/rejected": -0.14730355143547058,
783
+ "step": 430
784
+ },
785
+ {
786
+ "epoch": 1.88,
787
+ "grad_norm": 4.625,
788
+ "learning_rate": 1.5303859909878632e-06,
789
+ "log_odds_chosen": 0.6891047954559326,
790
+ "log_odds_ratio": -0.45858821272850037,
791
+ "logits/chosen": -3.18835711479187,
792
+ "logits/rejected": -3.198822021484375,
793
+ "logps/chosen": -0.43595075607299805,
794
+ "logps/rejected": -0.707006573677063,
795
+ "loss": 0.3569,
796
+ "nll_loss": 0.2663270831108093,
797
+ "rewards/accuracies": 0.84375,
798
+ "rewards/chosen": -0.08719015121459961,
799
+ "rewards/margins": 0.054211169481277466,
800
+ "rewards/rejected": -0.14140132069587708,
801
+ "step": 440
802
+ },
803
+ {
804
+ "epoch": 1.92,
805
+ "grad_norm": 4.84375,
806
+ "learning_rate": 1.4282685964923643e-06,
807
+ "log_odds_chosen": 0.8139818906784058,
808
+ "log_odds_ratio": -0.41608935594558716,
809
+ "logits/chosen": -3.1146390438079834,
810
+ "logits/rejected": -3.167144775390625,
811
+ "logps/chosen": -0.41870421171188354,
812
+ "logps/rejected": -0.7421842813491821,
813
+ "loss": 0.3699,
814
+ "nll_loss": 0.2976505160331726,
815
+ "rewards/accuracies": 0.8374999761581421,
816
+ "rewards/chosen": -0.08374085277318954,
817
+ "rewards/margins": 0.06469599902629852,
818
+ "rewards/rejected": -0.14843687415122986,
819
+ "step": 450
820
+ },
821
+ {
822
+ "epoch": 1.97,
823
+ "grad_norm": 4.53125,
824
+ "learning_rate": 1.328297247838241e-06,
825
+ "log_odds_chosen": 0.7360786199569702,
826
+ "log_odds_ratio": -0.4329991936683655,
827
+ "logits/chosen": -3.162118434906006,
828
+ "logits/rejected": -3.2087559700012207,
829
+ "logps/chosen": -0.4136020541191101,
830
+ "logps/rejected": -0.7124483585357666,
831
+ "loss": 0.3642,
832
+ "nll_loss": 0.2634555995464325,
833
+ "rewards/accuracies": 0.887499988079071,
834
+ "rewards/chosen": -0.08272042125463486,
835
+ "rewards/margins": 0.05976926162838936,
836
+ "rewards/rejected": -0.14248968660831451,
837
+ "step": 460
838
+ },
839
+ {
840
+ "epoch": 2.01,
841
+ "grad_norm": 5.3125,
842
+ "learning_rate": 1.2306721286688312e-06,
843
+ "log_odds_chosen": 0.8309246301651001,
844
+ "log_odds_ratio": -0.4013638496398926,
845
+ "logits/chosen": -3.150937795639038,
846
+ "logits/rejected": -3.205261707305908,
847
+ "logps/chosen": -0.4131666123867035,
848
+ "logps/rejected": -0.7659183740615845,
849
+ "loss": 0.3651,
850
+ "nll_loss": 0.2736993134021759,
851
+ "rewards/accuracies": 0.9375,
852
+ "rewards/chosen": -0.08263333141803741,
853
+ "rewards/margins": 0.07055036723613739,
854
+ "rewards/rejected": -0.1531836986541748,
855
+ "step": 470
856
+ },
857
+ {
858
+ "epoch": 2.05,
859
+ "grad_norm": 4.5625,
860
+ "learning_rate": 1.1355887245137383e-06,
861
+ "log_odds_chosen": 1.1850926876068115,
862
+ "log_odds_ratio": -0.30928006768226624,
863
+ "logits/chosen": -3.197258234024048,
864
+ "logits/rejected": -3.233253002166748,
865
+ "logps/chosen": -0.3570306599140167,
866
+ "logps/rejected": -0.8269041776657104,
867
+ "loss": 0.306,
868
+ "nll_loss": 0.21892933547496796,
869
+ "rewards/accuracies": 0.956250011920929,
870
+ "rewards/chosen": -0.07140612602233887,
871
+ "rewards/margins": 0.09397471696138382,
872
+ "rewards/rejected": -0.16538085043430328,
873
+ "step": 480
874
+ },
875
+ {
876
+ "epoch": 2.09,
877
+ "grad_norm": 4.3125,
878
+ "learning_rate": 1.043237431346622e-06,
879
+ "log_odds_chosen": 1.2696115970611572,
880
+ "log_odds_ratio": -0.29188066720962524,
881
+ "logits/chosen": -3.060274600982666,
882
+ "logits/rejected": -3.112060785293579,
883
+ "logps/chosen": -0.37511518597602844,
884
+ "logps/rejected": -0.8814083933830261,
885
+ "loss": 0.3133,
886
+ "nll_loss": 0.2801322340965271,
887
+ "rewards/accuracies": 0.96875,
888
+ "rewards/chosen": -0.07502304017543793,
889
+ "rewards/margins": 0.10125863552093506,
890
+ "rewards/rejected": -0.17628169059753418,
891
+ "step": 490
892
+ },
893
+ {
894
+ "epoch": 2.14,
895
+ "grad_norm": 4.40625,
896
+ "learning_rate": 9.538031743343628e-07,
897
+ "log_odds_chosen": 1.2528076171875,
898
+ "log_odds_ratio": -0.3035999834537506,
899
+ "logits/chosen": -3.0861003398895264,
900
+ "logits/rejected": -3.111483097076416,
901
+ "logps/chosen": -0.39375776052474976,
902
+ "logps/rejected": -0.9319614171981812,
903
+ "loss": 0.3176,
904
+ "nll_loss": 0.2627008259296417,
905
+ "rewards/accuracies": 0.956250011920929,
906
+ "rewards/chosen": -0.07875155657529831,
907
+ "rewards/margins": 0.10764074325561523,
908
+ "rewards/rejected": -0.18639230728149414,
909
+ "step": 500
910
+ },
911
+ {
912
+ "epoch": 2.18,
913
+ "grad_norm": 4.75,
914
+ "learning_rate": 8.674650375410379e-07,
915
+ "log_odds_chosen": 1.0996912717819214,
916
+ "log_odds_ratio": -0.34020066261291504,
917
+ "logits/chosen": -3.0855681896209717,
918
+ "logits/rejected": -3.1118760108947754,
919
+ "logps/chosen": -0.426668643951416,
920
+ "logps/rejected": -0.8785163760185242,
921
+ "loss": 0.3107,
922
+ "nll_loss": 0.2832065224647522,
923
+ "rewards/accuracies": 0.925000011920929,
924
+ "rewards/chosen": -0.08533372730016708,
925
+ "rewards/margins": 0.09036955237388611,
926
+ "rewards/rejected": -0.17570330202579498,
927
+ "step": 510
928
+ },
929
+ {
930
+ "epoch": 2.22,
931
+ "grad_norm": 4.3125,
932
+ "learning_rate": 7.843959053281663e-07,
933
+ "log_odds_chosen": 1.3144631385803223,
934
+ "log_odds_ratio": -0.2888423502445221,
935
+ "logits/chosen": -3.060899019241333,
936
+ "logits/rejected": -3.110773801803589,
937
+ "logps/chosen": -0.3530345559120178,
938
+ "logps/rejected": -0.8913728594779968,
939
+ "loss": 0.3117,
940
+ "nll_loss": 0.2591019570827484,
941
+ "rewards/accuracies": 0.956250011920929,
942
+ "rewards/chosen": -0.07060691714286804,
943
+ "rewards/margins": 0.10766766965389252,
944
+ "rewards/rejected": -0.17827460169792175,
945
+ "step": 520
946
+ },
947
+ {
948
+ "epoch": 2.26,
949
+ "grad_norm": 5.0,
950
+ "learning_rate": 7.047621161693152e-07,
951
+ "log_odds_chosen": 1.327051043510437,
952
+ "log_odds_ratio": -0.2728634774684906,
953
+ "logits/chosen": -3.0373597145080566,
954
+ "logits/rejected": -3.121220350265503,
955
+ "logps/chosen": -0.35444390773773193,
956
+ "logps/rejected": -0.9054730534553528,
957
+ "loss": 0.3201,
958
+ "nll_loss": 0.2551073729991913,
959
+ "rewards/accuracies": 0.981249988079071,
960
+ "rewards/chosen": -0.07088878005743027,
961
+ "rewards/margins": 0.11020584404468536,
962
+ "rewards/rejected": -0.18109463155269623,
963
+ "step": 530
964
+ },
965
+ {
966
+ "epoch": 2.31,
967
+ "grad_norm": 4.8125,
968
+ "learning_rate": 6.28723129572247e-07,
969
+ "log_odds_chosen": 1.3342939615249634,
970
+ "log_odds_ratio": -0.27417218685150146,
971
+ "logits/chosen": -3.0797994136810303,
972
+ "logits/rejected": -3.1183524131774902,
973
+ "logps/chosen": -0.362510621547699,
974
+ "logps/rejected": -0.9240224957466125,
975
+ "loss": 0.314,
976
+ "nll_loss": 0.2499639093875885,
977
+ "rewards/accuracies": 0.9624999761581421,
978
+ "rewards/chosen": -0.07250212132930756,
979
+ "rewards/margins": 0.11230238527059555,
980
+ "rewards/rejected": -0.1848045140504837,
981
+ "step": 540
982
+ },
983
+ {
984
+ "epoch": 2.35,
985
+ "grad_norm": 4.5,
986
+ "learning_rate": 5.564312067755856e-07,
987
+ "log_odds_chosen": 1.2629302740097046,
988
+ "log_odds_ratio": -0.29583701491355896,
989
+ "logits/chosen": -3.106447696685791,
990
+ "logits/rejected": -3.153026580810547,
991
+ "logps/chosen": -0.36336833238601685,
992
+ "logps/rejected": -0.8715957403182983,
993
+ "loss": 0.3098,
994
+ "nll_loss": 0.2292967140674591,
995
+ "rewards/accuracies": 0.9750000238418579,
996
+ "rewards/chosen": -0.07267366349697113,
997
+ "rewards/margins": 0.10164550691843033,
998
+ "rewards/rejected": -0.17431916296482086,
999
+ "step": 550
1000
+ },
1001
+ {
1002
+ "epoch": 2.39,
1003
+ "grad_norm": 4.375,
1004
+ "learning_rate": 4.880311058593617e-07,
1005
+ "log_odds_chosen": 1.3260443210601807,
1006
+ "log_odds_ratio": -0.2782818675041199,
1007
+ "logits/chosen": -3.0871644020080566,
1008
+ "logits/rejected": -3.1364452838897705,
1009
+ "logps/chosen": -0.36632782220840454,
1010
+ "logps/rejected": -0.924032986164093,
1011
+ "loss": 0.2976,
1012
+ "nll_loss": 0.22606725990772247,
1013
+ "rewards/accuracies": 0.9750000238418579,
1014
+ "rewards/chosen": -0.07326555997133255,
1015
+ "rewards/margins": 0.11154104769229889,
1016
+ "rewards/rejected": -0.18480661511421204,
1017
+ "step": 560
1018
+ },
1019
+ {
1020
+ "epoch": 2.44,
1021
+ "grad_norm": 4.96875,
1022
+ "learning_rate": 4.2365979187997094e-07,
1023
+ "log_odds_chosen": 1.277055025100708,
1024
+ "log_odds_ratio": -0.29783302545547485,
1025
+ "logits/chosen": -3.0671963691711426,
1026
+ "logits/rejected": -3.1140341758728027,
1027
+ "logps/chosen": -0.381780207157135,
1028
+ "logps/rejected": -0.9046441912651062,
1029
+ "loss": 0.3148,
1030
+ "nll_loss": 0.2656724154949188,
1031
+ "rewards/accuracies": 0.9375,
1032
+ "rewards/chosen": -0.07635603845119476,
1033
+ "rewards/margins": 0.10457281768321991,
1034
+ "rewards/rejected": -0.18092885613441467,
1035
+ "step": 570
1036
+ },
1037
+ {
1038
+ "epoch": 2.48,
1039
+ "grad_norm": 4.9375,
1040
+ "learning_rate": 3.634461626099495e-07,
1041
+ "log_odds_chosen": 1.2928454875946045,
1042
+ "log_odds_ratio": -0.28668928146362305,
1043
+ "logits/chosen": -3.0869479179382324,
1044
+ "logits/rejected": -3.1252989768981934,
1045
+ "logps/chosen": -0.345980703830719,
1046
+ "logps/rejected": -0.8593694567680359,
1047
+ "loss": 0.3035,
1048
+ "nll_loss": 0.22678887844085693,
1049
+ "rewards/accuracies": 0.96875,
1050
+ "rewards/chosen": -0.06919614970684052,
1051
+ "rewards/margins": 0.10267776250839233,
1052
+ "rewards/rejected": -0.17187389731407166,
1053
+ "step": 580
1054
+ },
1055
+ {
1056
+ "epoch": 2.52,
1057
+ "grad_norm": 5.5,
1058
+ "learning_rate": 3.075107904317667e-07,
1059
+ "log_odds_chosen": 1.2825336456298828,
1060
+ "log_odds_ratio": -0.29012447595596313,
1061
+ "logits/chosen": -3.0542449951171875,
1062
+ "logits/rejected": -3.106863021850586,
1063
+ "logps/chosen": -0.36031287908554077,
1064
+ "logps/rejected": -0.8658483624458313,
1065
+ "loss": 0.3156,
1066
+ "nll_loss": 0.2495698481798172,
1067
+ "rewards/accuracies": 0.96875,
1068
+ "rewards/chosen": -0.07206258177757263,
1069
+ "rewards/margins": 0.10110710561275482,
1070
+ "rewards/rejected": -0.17316967248916626,
1071
+ "step": 590
1072
+ },
1073
+ {
1074
+ "epoch": 2.56,
1075
+ "grad_norm": 4.90625,
1076
+ "learning_rate": 2.5596568090246546e-07,
1077
+ "log_odds_chosen": 1.2836467027664185,
1078
+ "log_odds_ratio": -0.28635507822036743,
1079
+ "logits/chosen": -3.0764803886413574,
1080
+ "logits/rejected": -3.1473774909973145,
1081
+ "logps/chosen": -0.37658047676086426,
1082
+ "logps/rejected": -0.9045608639717102,
1083
+ "loss": 0.3141,
1084
+ "nll_loss": 0.24526798725128174,
1085
+ "rewards/accuracies": 0.9750000238418579,
1086
+ "rewards/chosen": -0.07531610131263733,
1087
+ "rewards/margins": 0.10559606552124023,
1088
+ "rewards/rejected": -0.18091216683387756,
1089
+ "step": 600
1090
+ },
1091
+ {
1092
+ "epoch": 2.61,
1093
+ "grad_norm": 4.28125,
1094
+ "learning_rate": 2.0891404847259267e-07,
1095
+ "log_odds_chosen": 1.2697139978408813,
1096
+ "log_odds_ratio": -0.29423031210899353,
1097
+ "logits/chosen": -3.090924024581909,
1098
+ "logits/rejected": -3.146167039871216,
1099
+ "logps/chosen": -0.38669395446777344,
1100
+ "logps/rejected": -0.9116611480712891,
1101
+ "loss": 0.313,
1102
+ "nll_loss": 0.25895369052886963,
1103
+ "rewards/accuracies": 0.96875,
1104
+ "rewards/chosen": -0.0773387923836708,
1105
+ "rewards/margins": 0.10499344766139984,
1106
+ "rewards/rejected": -0.18233224749565125,
1107
+ "step": 610
1108
+ },
1109
+ {
1110
+ "epoch": 2.65,
1111
+ "grad_norm": 4.96875,
1112
+ "learning_rate": 1.6645010980854082e-07,
1113
+ "log_odds_chosen": 1.3362077474594116,
1114
+ "log_odds_ratio": -0.2740064263343811,
1115
+ "logits/chosen": -3.075295925140381,
1116
+ "logits/rejected": -3.110752582550049,
1117
+ "logps/chosen": -0.3653913140296936,
1118
+ "logps/rejected": -0.9164104461669922,
1119
+ "loss": 0.3052,
1120
+ "nll_loss": 0.26205652952194214,
1121
+ "rewards/accuracies": 0.9750000238418579,
1122
+ "rewards/chosen": -0.07307826727628708,
1123
+ "rewards/margins": 0.1102038249373436,
1124
+ "rewards/rejected": -0.18328210711479187,
1125
+ "step": 620
1126
+ },
1127
+ {
1128
+ "epoch": 2.69,
1129
+ "grad_norm": 5.21875,
1130
+ "learning_rate": 1.286588951321363e-07,
1131
+ "log_odds_chosen": 1.2346899509429932,
1132
+ "log_odds_ratio": -0.29600512981414795,
1133
+ "logits/chosen": -3.094604015350342,
1134
+ "logits/rejected": -3.125540256500244,
1135
+ "logps/chosen": -0.393343448638916,
1136
+ "logps/rejected": -0.9184632301330566,
1137
+ "loss": 0.3236,
1138
+ "nll_loss": 0.2790059447288513,
1139
+ "rewards/accuracies": 0.96875,
1140
+ "rewards/chosen": -0.07866868376731873,
1141
+ "rewards/margins": 0.10502395778894424,
1142
+ "rewards/rejected": -0.18369261920452118,
1143
+ "step": 630
1144
+ },
1145
+ {
1146
+ "epoch": 2.74,
1147
+ "grad_norm": 4.6875,
1148
+ "learning_rate": 9.561607795526007e-08,
1149
+ "log_odds_chosen": 1.2945499420166016,
1150
+ "log_odds_ratio": -0.2860717177391052,
1151
+ "logits/chosen": -3.092331647872925,
1152
+ "logits/rejected": -3.149021863937378,
1153
+ "logps/chosen": -0.3724428415298462,
1154
+ "logps/rejected": -0.9131635427474976,
1155
+ "loss": 0.312,
1156
+ "nll_loss": 0.2520996630191803,
1157
+ "rewards/accuracies": 0.9750000238418579,
1158
+ "rewards/chosen": -0.074488565325737,
1159
+ "rewards/margins": 0.1081441268324852,
1160
+ "rewards/rejected": -0.182632714509964,
1161
+ "step": 640
1162
+ },
1163
+ {
1164
+ "epoch": 2.78,
1165
+ "grad_norm": 4.375,
1166
+ "learning_rate": 6.738782355044048e-08,
1167
+ "log_odds_chosen": 1.303377628326416,
1168
+ "log_odds_ratio": -0.28758224844932556,
1169
+ "logits/chosen": -3.0791025161743164,
1170
+ "logits/rejected": -3.130547046661377,
1171
+ "logps/chosen": -0.3604482114315033,
1172
+ "logps/rejected": -0.8876269459724426,
1173
+ "loss": 0.3161,
1174
+ "nll_loss": 0.2479972094297409,
1175
+ "rewards/accuracies": 0.9750000238418579,
1176
+ "rewards/chosen": -0.07208964973688126,
1177
+ "rewards/margins": 0.10543572902679443,
1178
+ "rewards/rejected": -0.1775253713130951,
1179
+ "step": 650
1180
+ },
1181
+ {
1182
+ "epoch": 2.82,
1183
+ "grad_norm": 4.8125,
1184
+ "learning_rate": 4.40306564608381e-08,
1185
+ "log_odds_chosen": 1.220916509628296,
1186
+ "log_odds_ratio": -0.3061557412147522,
1187
+ "logits/chosen": -3.0849318504333496,
1188
+ "logits/rejected": -3.0918779373168945,
1189
+ "logps/chosen": -0.38813266158103943,
1190
+ "logps/rejected": -0.8825920224189758,
1191
+ "loss": 0.3064,
1192
+ "nll_loss": 0.26622262597084045,
1193
+ "rewards/accuracies": 0.96875,
1194
+ "rewards/chosen": -0.0776265487074852,
1195
+ "rewards/margins": 0.09889186918735504,
1196
+ "rewards/rejected": -0.17651841044425964,
1197
+ "step": 660
1198
+ },
1199
+ {
1200
+ "epoch": 2.86,
1201
+ "grad_norm": 4.9375,
1202
+ "learning_rate": 2.5591347314928572e-08,
1203
+ "log_odds_chosen": 1.1736371517181396,
1204
+ "log_odds_ratio": -0.3195890486240387,
1205
+ "logits/chosen": -3.0899367332458496,
1206
+ "logits/rejected": -3.1268470287323,
1207
+ "logps/chosen": -0.3975283205509186,
1208
+ "logps/rejected": -0.897696316242218,
1209
+ "loss": 0.3181,
1210
+ "nll_loss": 0.2676974833011627,
1211
+ "rewards/accuracies": 0.9375,
1212
+ "rewards/chosen": -0.07950566709041595,
1213
+ "rewards/margins": 0.10003360360860825,
1214
+ "rewards/rejected": -0.1795392781496048,
1215
+ "step": 670
1216
+ },
1217
+ {
1218
+ "epoch": 2.91,
1219
+ "grad_norm": 3.921875,
1220
+ "learning_rate": 1.2106819172520434e-08,
1221
+ "log_odds_chosen": 1.2462742328643799,
1222
+ "log_odds_ratio": -0.29843610525131226,
1223
+ "logits/chosen": -3.0916683673858643,
1224
+ "logits/rejected": -3.126143217086792,
1225
+ "logps/chosen": -0.37134823203086853,
1226
+ "logps/rejected": -0.8719061613082886,
1227
+ "loss": 0.3068,
1228
+ "nll_loss": 0.2421465665102005,
1229
+ "rewards/accuracies": 0.9750000238418579,
1230
+ "rewards/chosen": -0.07426965236663818,
1231
+ "rewards/margins": 0.10011158138513565,
1232
+ "rewards/rejected": -0.17438122630119324,
1233
+ "step": 680
1234
+ },
1235
+ {
1236
+ "epoch": 2.95,
1237
+ "grad_norm": 4.0625,
1238
+ "learning_rate": 3.6040735896455957e-09,
1239
+ "log_odds_chosen": 1.2757344245910645,
1240
+ "log_odds_ratio": -0.29123741388320923,
1241
+ "logits/chosen": -3.0802972316741943,
1242
+ "logits/rejected": -3.130801200866699,
1243
+ "logps/chosen": -0.37011533975601196,
1244
+ "logps/rejected": -0.8979229927062988,
1245
+ "loss": 0.3024,
1246
+ "nll_loss": 0.23298993706703186,
1247
+ "rewards/accuracies": 0.9437500238418579,
1248
+ "rewards/chosen": -0.07402306795120239,
1249
+ "rewards/margins": 0.10556153953075409,
1250
+ "rewards/rejected": -0.17958460748195648,
1251
+ "step": 690
1252
+ },
1253
+ {
1254
+ "epoch": 2.99,
1255
+ "grad_norm": 5.0,
1256
+ "learning_rate": 1.0013655036916758e-10,
1257
+ "log_odds_chosen": 1.3622093200683594,
1258
+ "log_odds_ratio": -0.2736757695674896,
1259
+ "logits/chosen": -3.140601396560669,
1260
+ "logits/rejected": -3.1706244945526123,
1261
+ "logps/chosen": -0.34326601028442383,
1262
+ "logps/rejected": -0.8912754058837891,
1263
+ "loss": 0.3068,
1264
+ "nll_loss": 0.21759584546089172,
1265
+ "rewards/accuracies": 0.9750000238418579,
1266
+ "rewards/chosen": -0.06865320354700089,
1267
+ "rewards/margins": 0.10960187017917633,
1268
+ "rewards/rejected": -0.1782551109790802,
1269
+ "step": 700
1270
+ },
1271
+ {
1272
+ "epoch": 3.0,
1273
+ "step": 702,
1274
+ "total_flos": 0.0,
1275
+ "train_loss": 0.3926089354220279,
1276
+ "train_runtime": 9824.6416,
1277
+ "train_samples_per_second": 4.569,
1278
+ "train_steps_per_second": 0.071
1279
+ }
1280
+ ],
1281
+ "logging_steps": 10,
1282
+ "max_steps": 702,
1283
+ "num_input_tokens_seen": 0,
1284
+ "num_train_epochs": 3,
1285
+ "save_steps": 500,
1286
+ "total_flos": 0.0,
1287
+ "train_batch_size": 8,
1288
+ "trial_name": null,
1289
+ "trial_params": null
1290
+ }