Model save
Browse files- README.md +1 -5
- adapter_model.safetensors +1 -1
- all_results.json +5 -5
- train_results.json +5 -5
- trainer_state.json +184 -16
README.md
CHANGED
@@ -2,13 +2,9 @@
|
|
2 |
license: mit
|
3 |
library_name: peft
|
4 |
tags:
|
5 |
-
- alignment-handbook
|
6 |
-
- generated_from_trainer
|
7 |
- trl
|
8 |
- dpo
|
9 |
- generated_from_trainer
|
10 |
-
datasets:
|
11 |
-
- HuggingFaceH4/ultrafeedback_binarized
|
12 |
base_model: microsoft/phi-2
|
13 |
model-index:
|
14 |
- name: phi-2-gpo-iter-0
|
@@ -20,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
20 |
|
21 |
# phi-2-gpo-iter-0
|
22 |
|
23 |
-
This model is a fine-tuned version of [
|
24 |
|
25 |
## Model description
|
26 |
|
|
|
2 |
license: mit
|
3 |
library_name: peft
|
4 |
tags:
|
|
|
|
|
5 |
- trl
|
6 |
- dpo
|
7 |
- generated_from_trainer
|
|
|
|
|
8 |
base_model: microsoft/phi-2
|
9 |
model-index:
|
10 |
- name: phi-2-gpo-iter-0
|
|
|
16 |
|
17 |
# phi-2-gpo-iter-0
|
18 |
|
19 |
+
This model is a fine-tuned version of [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) on the None dataset.
|
20 |
|
21 |
## Model description
|
22 |
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 41977616
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:da885cfcacffb590b8bf76f9f633994f048e47a5c39f1b421d6c7f393e16c911
|
3 |
size 41977616
|
all_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
-
"epoch": 1.
|
3 |
-
"train_loss": 0.
|
4 |
-
"train_runtime":
|
5 |
"train_samples": 30567,
|
6 |
-
"train_samples_per_second": 1.
|
7 |
-
"train_steps_per_second": 0.
|
8 |
}
|
|
|
1 |
{
|
2 |
+
"epoch": 1.98,
|
3 |
+
"train_loss": 0.010236877437320447,
|
4 |
+
"train_runtime": 1512.0271,
|
5 |
"train_samples": 30567,
|
6 |
+
"train_samples_per_second": 1.323,
|
7 |
+
"train_steps_per_second": 0.082
|
8 |
}
|
train_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
-
"epoch": 1.
|
3 |
-
"train_loss": 0.
|
4 |
-
"train_runtime":
|
5 |
"train_samples": 30567,
|
6 |
-
"train_samples_per_second": 1.
|
7 |
-
"train_steps_per_second": 0.
|
8 |
}
|
|
|
1 |
{
|
2 |
+
"epoch": 1.98,
|
3 |
+
"train_loss": 0.010236877437320447,
|
4 |
+
"train_runtime": 1512.0271,
|
5 |
"train_samples": 30567,
|
6 |
+
"train_samples_per_second": 1.323,
|
7 |
+
"train_steps_per_second": 0.082
|
8 |
}
|
trainer_state.json
CHANGED
@@ -1,21 +1,21 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 1.
|
5 |
"eval_steps": 100,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
-
"epoch":
|
13 |
-
"learning_rate":
|
14 |
-
"logits/chosen": 0.
|
15 |
-
"logits/rejected": 0.
|
16 |
-
"logps/chosen": -
|
17 |
-
"logps/rejected": -
|
18 |
-
"loss": 0.
|
19 |
"rewards/accuracies": 0.0,
|
20 |
"rewards/chosen": 0.0,
|
21 |
"rewards/margins": 0.0,
|
@@ -23,17 +23,185 @@
|
|
23 |
"step": 1
|
24 |
},
|
25 |
{
|
26 |
-
"epoch":
|
27 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
"total_flos": 0.0,
|
29 |
-
"train_loss": 0.
|
30 |
-
"train_runtime":
|
31 |
-
"train_samples_per_second": 1.
|
32 |
-
"train_steps_per_second": 0.
|
33 |
}
|
34 |
],
|
35 |
"logging_steps": 10,
|
36 |
-
"max_steps":
|
37 |
"num_input_tokens_seen": 0,
|
38 |
"num_train_epochs": 2,
|
39 |
"save_steps": 100,
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 1.984,
|
5 |
"eval_steps": 100,
|
6 |
+
"global_step": 124,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
+
"epoch": 0.02,
|
13 |
+
"learning_rate": 3.846153846153847e-07,
|
14 |
+
"logits/chosen": 0.7132297158241272,
|
15 |
+
"logits/rejected": 0.7902912497520447,
|
16 |
+
"logps/chosen": -182.8937530517578,
|
17 |
+
"logps/rejected": -166.20700073242188,
|
18 |
+
"loss": 0.0102,
|
19 |
"rewards/accuracies": 0.0,
|
20 |
"rewards/chosen": 0.0,
|
21 |
"rewards/margins": 0.0,
|
|
|
23 |
"step": 1
|
24 |
},
|
25 |
{
|
26 |
+
"epoch": 0.16,
|
27 |
+
"learning_rate": 3.846153846153847e-06,
|
28 |
+
"logits/chosen": 0.87026047706604,
|
29 |
+
"logits/rejected": 0.8367093801498413,
|
30 |
+
"logps/chosen": -155.32986450195312,
|
31 |
+
"logps/rejected": -139.0651397705078,
|
32 |
+
"loss": 0.0102,
|
33 |
+
"rewards/accuracies": 0.3611111044883728,
|
34 |
+
"rewards/chosen": 0.0006189693231135607,
|
35 |
+
"rewards/margins": 9.571156988386065e-05,
|
36 |
+
"rewards/rejected": 0.0005232577677816153,
|
37 |
+
"step": 10
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 0.32,
|
41 |
+
"learning_rate": 4.951096619903317e-06,
|
42 |
+
"logits/chosen": 0.8673034906387329,
|
43 |
+
"logits/rejected": 0.8491884469985962,
|
44 |
+
"logps/chosen": -148.64974975585938,
|
45 |
+
"logps/rejected": -141.98843383789062,
|
46 |
+
"loss": 0.0103,
|
47 |
+
"rewards/accuracies": 0.33125001192092896,
|
48 |
+
"rewards/chosen": 6.371500785462558e-05,
|
49 |
+
"rewards/margins": -0.00015192512364592403,
|
50 |
+
"rewards/rejected": 0.00021564005874097347,
|
51 |
+
"step": 20
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 0.48,
|
55 |
+
"learning_rate": 4.716164218065246e-06,
|
56 |
+
"logits/chosen": 0.8203651309013367,
|
57 |
+
"logits/rejected": 0.8144901990890503,
|
58 |
+
"logps/chosen": -143.59263610839844,
|
59 |
+
"logps/rejected": -129.68614196777344,
|
60 |
+
"loss": 0.0102,
|
61 |
+
"rewards/accuracies": 0.4124999940395355,
|
62 |
+
"rewards/chosen": 0.0005150804063305259,
|
63 |
+
"rewards/margins": 0.00021757767535746098,
|
64 |
+
"rewards/rejected": 0.00029750276007689536,
|
65 |
+
"step": 30
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 0.64,
|
69 |
+
"learning_rate": 4.3048902348863116e-06,
|
70 |
+
"logits/chosen": 0.8285233378410339,
|
71 |
+
"logits/rejected": 0.8196646571159363,
|
72 |
+
"logps/chosen": -175.5411376953125,
|
73 |
+
"logps/rejected": -162.92697143554688,
|
74 |
+
"loss": 0.0104,
|
75 |
+
"rewards/accuracies": 0.3187499940395355,
|
76 |
+
"rewards/chosen": -3.423704811211792e-06,
|
77 |
+
"rewards/margins": -0.0005603213212452829,
|
78 |
+
"rewards/rejected": 0.0005568976048380136,
|
79 |
+
"step": 40
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 0.8,
|
83 |
+
"learning_rate": 3.7500000000000005e-06,
|
84 |
+
"logits/chosen": 0.8533094525337219,
|
85 |
+
"logits/rejected": 0.8660014271736145,
|
86 |
+
"logps/chosen": -154.22348022460938,
|
87 |
+
"logps/rejected": -141.3745880126953,
|
88 |
+
"loss": 0.0102,
|
89 |
+
"rewards/accuracies": 0.4375,
|
90 |
+
"rewards/chosen": -0.0006007859483361244,
|
91 |
+
"rewards/margins": 9.4042276032269e-05,
|
92 |
+
"rewards/rejected": -0.0006948282243683934,
|
93 |
+
"step": 50
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"epoch": 0.96,
|
97 |
+
"learning_rate": 3.0956464785579125e-06,
|
98 |
+
"logits/chosen": 0.840475857257843,
|
99 |
+
"logits/rejected": 0.835978627204895,
|
100 |
+
"logps/chosen": -147.8980255126953,
|
101 |
+
"logps/rejected": -135.54531860351562,
|
102 |
+
"loss": 0.0102,
|
103 |
+
"rewards/accuracies": 0.3499999940395355,
|
104 |
+
"rewards/chosen": -0.0003634719760157168,
|
105 |
+
"rewards/margins": -3.2313815609086305e-05,
|
106 |
+
"rewards/rejected": -0.00033115819678641856,
|
107 |
+
"step": 60
|
108 |
+
},
|
109 |
+
{
|
110 |
+
"epoch": 1.12,
|
111 |
+
"learning_rate": 2.39389699200963e-06,
|
112 |
+
"logits/chosen": 0.9384347796440125,
|
113 |
+
"logits/rejected": 0.9138472676277161,
|
114 |
+
"logps/chosen": -141.71607971191406,
|
115 |
+
"logps/rejected": -131.02381896972656,
|
116 |
+
"loss": 0.0102,
|
117 |
+
"rewards/accuracies": 0.4000000059604645,
|
118 |
+
"rewards/chosen": -0.0002880954125430435,
|
119 |
+
"rewards/margins": 0.0002658166631590575,
|
120 |
+
"rewards/rejected": -0.0005539121339097619,
|
121 |
+
"step": 70
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"epoch": 1.28,
|
125 |
+
"learning_rate": 1.700590188571887e-06,
|
126 |
+
"logits/chosen": 0.8153573274612427,
|
127 |
+
"logits/rejected": 0.8099604845046997,
|
128 |
+
"logps/chosen": -133.85093688964844,
|
129 |
+
"logps/rejected": -118.66615295410156,
|
130 |
+
"loss": 0.0102,
|
131 |
+
"rewards/accuracies": 0.41874998807907104,
|
132 |
+
"rewards/chosen": -0.00017777756147552282,
|
133 |
+
"rewards/margins": 0.0004668553010560572,
|
134 |
+
"rewards/rejected": -0.000644632731564343,
|
135 |
+
"step": 80
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"epoch": 1.44,
|
139 |
+
"learning_rate": 1.0708929268538034e-06,
|
140 |
+
"logits/chosen": 0.8284982442855835,
|
141 |
+
"logits/rejected": 0.8237984776496887,
|
142 |
+
"logps/chosen": -158.91497802734375,
|
143 |
+
"logps/rejected": -146.9730682373047,
|
144 |
+
"loss": 0.0102,
|
145 |
+
"rewards/accuracies": 0.4437499940395355,
|
146 |
+
"rewards/chosen": -0.000682778307236731,
|
147 |
+
"rewards/margins": 0.0004111085145268589,
|
148 |
+
"rewards/rejected": -0.0010938867926597595,
|
149 |
+
"step": 90
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"epoch": 1.6,
|
153 |
+
"learning_rate": 5.549106142039018e-07,
|
154 |
+
"logits/chosen": 0.8617427945137024,
|
155 |
+
"logits/rejected": 0.8781857490539551,
|
156 |
+
"logps/chosen": -152.7745819091797,
|
157 |
+
"logps/rejected": -141.37179565429688,
|
158 |
+
"loss": 0.0103,
|
159 |
+
"rewards/accuracies": 0.34375,
|
160 |
+
"rewards/chosen": -0.001233941875398159,
|
161 |
+
"rewards/margins": -0.00010122207459062338,
|
162 |
+
"rewards/rejected": -0.0011327198008075356,
|
163 |
+
"step": 100
|
164 |
+
},
|
165 |
+
{
|
166 |
+
"epoch": 1.76,
|
167 |
+
"learning_rate": 1.937002879188285e-07,
|
168 |
+
"logits/chosen": 0.8223294019699097,
|
169 |
+
"logits/rejected": 0.8318236470222473,
|
170 |
+
"logps/chosen": -167.8646697998047,
|
171 |
+
"logps/rejected": -155.11688232421875,
|
172 |
+
"loss": 0.0103,
|
173 |
+
"rewards/accuracies": 0.32499998807907104,
|
174 |
+
"rewards/chosen": -0.001379788271151483,
|
175 |
+
"rewards/margins": -0.0001792705152183771,
|
176 |
+
"rewards/rejected": -0.0012005178723484278,
|
177 |
+
"step": 110
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"epoch": 1.92,
|
181 |
+
"learning_rate": 1.6003680950742728e-08,
|
182 |
+
"logits/chosen": 0.8208626508712769,
|
183 |
+
"logits/rejected": 0.8026873469352722,
|
184 |
+
"logps/chosen": -152.2623748779297,
|
185 |
+
"logps/rejected": -142.6562957763672,
|
186 |
+
"loss": 0.0102,
|
187 |
+
"rewards/accuracies": 0.41874998807907104,
|
188 |
+
"rewards/chosen": -0.0007717030239291489,
|
189 |
+
"rewards/margins": 3.4128395782317966e-05,
|
190 |
+
"rewards/rejected": -0.000805831397883594,
|
191 |
+
"step": 120
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"epoch": 1.98,
|
195 |
+
"step": 124,
|
196 |
"total_flos": 0.0,
|
197 |
+
"train_loss": 0.010236877437320447,
|
198 |
+
"train_runtime": 1512.0271,
|
199 |
+
"train_samples_per_second": 1.323,
|
200 |
+
"train_steps_per_second": 0.082
|
201 |
}
|
202 |
],
|
203 |
"logging_steps": 10,
|
204 |
+
"max_steps": 124,
|
205 |
"num_input_tokens_seen": 0,
|
206 |
"num_train_epochs": 2,
|
207 |
"save_steps": 100,
|