File size: 6,872 Bytes
f3f2e43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.971563981042654,
  "eval_steps": 100,
  "global_step": 104,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.02,
      "grad_norm": 139.638709617328,
      "learning_rate": 4.545454545454545e-08,
      "logits/chosen": 111.16130065917969,
      "logits/rejected": 86.8372802734375,
      "logps/chosen": -326.8536071777344,
      "logps/rejected": -329.15960693359375,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.19,
      "grad_norm": 141.5345140695996,
      "learning_rate": 4.545454545454545e-07,
      "logits/chosen": 110.37065124511719,
      "logits/rejected": 133.2639923095703,
      "logps/chosen": -350.3541259765625,
      "logps/rejected": -434.3558349609375,
      "loss": 0.7191,
      "rewards/accuracies": 0.4722222089767456,
      "rewards/chosen": 0.13274627923965454,
      "rewards/margins": 0.07573667168617249,
      "rewards/rejected": 0.05700961872935295,
      "step": 10
    },
    {
      "epoch": 0.38,
      "grad_norm": 123.71909837085582,
      "learning_rate": 4.885348141000122e-07,
      "logits/chosen": 117.74342346191406,
      "logits/rejected": 128.52548217773438,
      "logps/chosen": -333.21240234375,
      "logps/rejected": -410.2923889160156,
      "loss": 0.6097,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": 0.11470325291156769,
      "rewards/margins": 0.7479402422904968,
      "rewards/rejected": -0.6332370042800903,
      "step": 20
    },
    {
      "epoch": 0.57,
      "grad_norm": 111.89651526533274,
      "learning_rate": 4.5025027361734613e-07,
      "logits/chosen": 114.44095611572266,
      "logits/rejected": 119.11683654785156,
      "logps/chosen": -399.1412048339844,
      "logps/rejected": -474.2645568847656,
      "loss": 0.596,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -1.7276217937469482,
      "rewards/margins": 1.0803521871566772,
      "rewards/rejected": -2.807974100112915,
      "step": 30
    },
    {
      "epoch": 0.76,
      "grad_norm": 102.67088507130228,
      "learning_rate": 3.893311157806091e-07,
      "logits/chosen": 116.33101654052734,
      "logits/rejected": 111.0595703125,
      "logps/chosen": -428.7275390625,
      "logps/rejected": -464.0934143066406,
      "loss": 0.5343,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -2.2770252227783203,
      "rewards/margins": 0.9522085189819336,
      "rewards/rejected": -3.229233503341675,
      "step": 40
    },
    {
      "epoch": 0.95,
      "grad_norm": 130.9996197198566,
      "learning_rate": 3.126631330646801e-07,
      "logits/chosen": 123.2393569946289,
      "logits/rejected": 124.50789642333984,
      "logps/chosen": -438.548095703125,
      "logps/rejected": -474.1234436035156,
      "loss": 0.5138,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -2.3258581161499023,
      "rewards/margins": 1.3220884799957275,
      "rewards/rejected": -3.647946834564209,
      "step": 50
    },
    {
      "epoch": 1.14,
      "grad_norm": 56.950942870641875,
      "learning_rate": 2.2891223348923882e-07,
      "logits/chosen": 122.619384765625,
      "logits/rejected": 126.1447525024414,
      "logps/chosen": -414.3634338378906,
      "logps/rejected": -468.19586181640625,
      "loss": 0.2724,
      "rewards/accuracies": 0.893750011920929,
      "rewards/chosen": -2.3773388862609863,
      "rewards/margins": 2.358515501022339,
      "rewards/rejected": -4.735854148864746,
      "step": 60
    },
    {
      "epoch": 1.33,
      "grad_norm": 52.820355390804025,
      "learning_rate": 1.4754491880085317e-07,
      "logits/chosen": 117.16709899902344,
      "logits/rejected": 118.9737319946289,
      "logps/chosen": -387.70526123046875,
      "logps/rejected": -511.97503662109375,
      "loss": 0.1936,
      "rewards/accuracies": 0.9437500238418579,
      "rewards/chosen": -2.4186935424804688,
      "rewards/margins": 2.5914835929870605,
      "rewards/rejected": -5.010177135467529,
      "step": 70
    },
    {
      "epoch": 1.52,
      "grad_norm": 51.657826972971314,
      "learning_rate": 7.775827023107834e-08,
      "logits/chosen": 124.15473937988281,
      "logits/rejected": 125.7086181640625,
      "logps/chosen": -446.75421142578125,
      "logps/rejected": -543.6109619140625,
      "loss": 0.1779,
      "rewards/accuracies": 0.981249988079071,
      "rewards/chosen": -2.316882848739624,
      "rewards/margins": 2.962496757507324,
      "rewards/rejected": -5.279379844665527,
      "step": 80
    },
    {
      "epoch": 1.71,
      "grad_norm": 86.34373603352554,
      "learning_rate": 2.7440387297912122e-08,
      "logits/chosen": 107.07579040527344,
      "logits/rejected": 111.74522399902344,
      "logps/chosen": -425.4237365722656,
      "logps/rejected": -509.67718505859375,
      "loss": 0.1765,
      "rewards/accuracies": 0.9437500238418579,
      "rewards/chosen": -2.749206066131592,
      "rewards/margins": 3.0597147941589355,
      "rewards/rejected": -5.8089213371276855,
      "step": 90
    },
    {
      "epoch": 1.9,
      "grad_norm": 51.66215546933828,
      "learning_rate": 2.27878296044029e-09,
      "logits/chosen": 123.38490295410156,
      "logits/rejected": 113.675537109375,
      "logps/chosen": -439.7268981933594,
      "logps/rejected": -550.8162841796875,
      "loss": 0.1923,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -2.560769557952881,
      "rewards/margins": 3.2135703563690186,
      "rewards/rejected": -5.77433967590332,
      "step": 100
    },
    {
      "epoch": 1.9,
      "eval_logits/chosen": 92.73604583740234,
      "eval_logits/rejected": 86.38631439208984,
      "eval_logps/chosen": -431.5707092285156,
      "eval_logps/rejected": -459.1661682128906,
      "eval_loss": 0.4735770523548126,
      "eval_rewards/accuracies": 0.75,
      "eval_rewards/chosen": -3.4575202465057373,
      "eval_rewards/margins": 1.4980329275131226,
      "eval_rewards/rejected": -4.9555535316467285,
      "eval_runtime": 50.3064,
      "eval_samples_per_second": 14.909,
      "eval_steps_per_second": 0.477,
      "step": 100
    },
    {
      "epoch": 1.97,
      "step": 104,
      "total_flos": 0.0,
      "train_loss": 0.38887147261546207,
      "train_runtime": 1183.8142,
      "train_samples_per_second": 11.404,
      "train_steps_per_second": 0.088
    }
  ],
  "logging_steps": 10,
  "max_steps": 104,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 500,
  "total_flos": 0.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}