lewtun HF staff commited on
Commit
f87bd26
1 Parent(s): 8f71d78

Model save

Browse files
README.md CHANGED
@@ -2,13 +2,9 @@
2
  license: apache-2.0
3
  base_model: alignment-handbook/zephyr-7b-sft-full
4
  tags:
5
- - alignment-handbook
6
- - generated_from_trainer
7
  - trl
8
  - dpo
9
  - generated_from_trainer
10
- datasets:
11
- - HuggingFaceH4/ultrafeedback_binarized
12
  model-index:
13
  - name: zephyr-7b-dpo-full
14
  results: []
@@ -19,7 +15,7 @@ should probably proofread and complete it, then remove this comment. -->
19
 
20
  # zephyr-7b-dpo-full
21
 
22
- This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the HuggingFaceH4/ultrafeedback_binarized dataset.
23
  It achieves the following results on the evaluation set:
24
  - Loss: 0.5028
25
  - Rewards/chosen: -0.9469
@@ -64,12 +60,12 @@ The following hyperparameters were used during training:
64
 
65
  ### Training results
66
 
67
- | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
68
- |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
69
- | 0.5545 | 0.21 | 100 | -1.3212 | -1.0287 | -312.0799 | -374.3159 | 0.5658 | 0.7188 | -0.4953 | 0.6264 | -1.1217 |
70
- | 0.5026 | 0.42 | 200 | -0.1773 | 0.5190 | -352.4985 | -439.3264 | 0.5202 | 0.7461 | -0.8995 | 0.8723 | -1.7718 |
71
- | 0.5106 | 0.63 | 300 | 0.0862 | 0.9099 | -342.0043 | -424.9976 | 0.5104 | 0.7656 | -0.7946 | 0.8339 | -1.6285 |
72
- | 0.4859 | 0.84 | 400 | 0.7818 | 1.7438 | -360.3139 | -457.9452 | 0.5031 | 0.7578 | -0.9777 | 0.9803 | -1.9580 |
73
 
74
 
75
  ### Framework versions
 
2
  license: apache-2.0
3
  base_model: alignment-handbook/zephyr-7b-sft-full
4
  tags:
 
 
5
  - trl
6
  - dpo
7
  - generated_from_trainer
 
 
8
  model-index:
9
  - name: zephyr-7b-dpo-full
10
  results: []
 
15
 
16
  # zephyr-7b-dpo-full
17
 
18
+ This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the None dataset.
19
  It achieves the following results on the evaluation set:
20
  - Loss: 0.5028
21
  - Rewards/chosen: -0.9469
 
60
 
61
  ### Training results
62
 
63
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
64
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
65
+ | 0.5545 | 0.21 | 100 | 0.5658 | -0.4953 | -1.1217 | 0.7188 | 0.6264 | -374.3159 | -312.0799 | -1.0287 | -1.3212 |
66
+ | 0.5026 | 0.42 | 200 | 0.5202 | -0.8995 | -1.7718 | 0.7461 | 0.8723 | -439.3264 | -352.4985 | 0.5190 | -0.1773 |
67
+ | 0.5106 | 0.63 | 300 | 0.5104 | -0.7946 | -1.6285 | 0.7656 | 0.8339 | -424.9976 | -342.0043 | 0.9099 | 0.0862 |
68
+ | 0.4859 | 0.84 | 400 | 0.5031 | -0.9777 | -1.9580 | 0.7578 | 0.9803 | -457.9452 | -360.3139 | 1.7438 | 0.7818 |
69
 
70
 
71
  ### Framework versions
all_results.json CHANGED
@@ -9,13 +9,13 @@
9
  "eval_rewards/chosen": -0.9468507170677185,
10
  "eval_rewards/margins": 0.946345865726471,
11
  "eval_rewards/rejected": -1.8931965827941895,
12
- "eval_runtime": 87.2102,
13
  "eval_samples": 2000,
14
- "eval_samples_per_second": 22.933,
15
- "eval_steps_per_second": 0.367,
16
- "train_loss": 0.07998354963677698,
17
- "train_runtime": 779.5821,
18
  "train_samples": 61135,
19
- "train_samples_per_second": 78.42,
20
- "train_steps_per_second": 0.613
21
  }
 
9
  "eval_rewards/chosen": -0.9468507170677185,
10
  "eval_rewards/margins": 0.946345865726471,
11
  "eval_rewards/rejected": -1.8931965827941895,
12
+ "eval_runtime": 88.2311,
13
  "eval_samples": 2000,
14
+ "eval_samples_per_second": 22.668,
15
+ "eval_steps_per_second": 0.363,
16
+ "train_loss": 0.5366686437918052,
17
+ "train_runtime": 5259.7251,
18
  "train_samples": 61135,
19
+ "train_samples_per_second": 11.623,
20
+ "train_steps_per_second": 0.091
21
  }
eval_results.json CHANGED
@@ -9,8 +9,8 @@
9
  "eval_rewards/chosen": -0.9468507170677185,
10
  "eval_rewards/margins": 0.946345865726471,
11
  "eval_rewards/rejected": -1.8931965827941895,
12
- "eval_runtime": 87.2102,
13
  "eval_samples": 2000,
14
- "eval_samples_per_second": 22.933,
15
- "eval_steps_per_second": 0.367
16
  }
 
9
  "eval_rewards/chosen": -0.9468507170677185,
10
  "eval_rewards/margins": 0.946345865726471,
11
  "eval_rewards/rejected": -1.8931965827941895,
12
+ "eval_runtime": 88.2311,
13
  "eval_samples": 2000,
14
+ "eval_samples_per_second": 22.668,
15
+ "eval_steps_per_second": 0.363
16
  }
runs/Jan09_05-05-41_ip-26-0-161-142/events.out.tfevents.1704776777.ip-26-0-161-142.2982297.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1068be05be4b4c4c83692d6d4d75f6deee09d6a702a2c7606b08376b742f96aa
3
- size 33330
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45086008b51ce161ef9703ada557dbf4c3cb439d62224dfdcab11d57f3f06d19
3
+ size 38122
runs/Jan09_05-05-41_ip-26-0-161-142/events.out.tfevents.1704782125.ip-26-0-161-142.2982297.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a80cc3e09600ca8513a0fa3bf27018544c48561c7d7ab1eb12474aff0b3f0a2
3
+ size 828
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.07998354963677698,
4
- "train_runtime": 779.5821,
5
  "train_samples": 61135,
6
- "train_samples_per_second": 78.42,
7
- "train_steps_per_second": 0.613
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.5366686437918052,
4
+ "train_runtime": 5259.7251,
5
  "train_samples": 61135,
6
+ "train_samples_per_second": 11.623,
7
+ "train_steps_per_second": 0.091
8
  }
trainer_state.json CHANGED
@@ -173,9 +173,9 @@
173
  "eval_rewards/chosen": -0.49532508850097656,
174
  "eval_rewards/margins": 0.6263692378997803,
175
  "eval_rewards/rejected": -1.1216944456100464,
176
- "eval_runtime": 88.7193,
177
- "eval_samples_per_second": 22.543,
178
- "eval_steps_per_second": 0.361,
179
  "step": 100
180
  },
181
  {
@@ -329,9 +329,9 @@
329
  "eval_rewards/chosen": -0.899510383605957,
330
  "eval_rewards/margins": 0.8722902536392212,
331
  "eval_rewards/rejected": -1.7718006372451782,
332
- "eval_runtime": 88.5484,
333
- "eval_samples_per_second": 22.587,
334
- "eval_steps_per_second": 0.361,
335
  "step": 200
336
  },
337
  {
@@ -485,9 +485,9 @@
485
  "eval_rewards/chosen": -0.7945692539215088,
486
  "eval_rewards/margins": 0.8339425921440125,
487
  "eval_rewards/rejected": -1.628511905670166,
488
- "eval_runtime": 88.6653,
489
- "eval_samples_per_second": 22.557,
490
- "eval_steps_per_second": 0.361,
491
  "step": 300
492
  },
493
  {
@@ -641,8 +641,8 @@
641
  "eval_rewards/chosen": -0.9776647090911865,
642
  "eval_rewards/margins": 0.9803228974342346,
643
  "eval_rewards/rejected": -1.9579875469207764,
644
- "eval_runtime": 88.4827,
645
- "eval_samples_per_second": 22.603,
646
  "eval_steps_per_second": 0.362,
647
  "step": 400
648
  },
@@ -748,10 +748,10 @@
748
  "epoch": 1.0,
749
  "step": 478,
750
  "total_flos": 0.0,
751
- "train_loss": 0.07998354963677698,
752
- "train_runtime": 779.5821,
753
- "train_samples_per_second": 78.42,
754
- "train_steps_per_second": 0.613
755
  }
756
  ],
757
  "logging_steps": 10,
 
173
  "eval_rewards/chosen": -0.49532508850097656,
174
  "eval_rewards/margins": 0.6263692378997803,
175
  "eval_rewards/rejected": -1.1216944456100464,
176
+ "eval_runtime": 87.9008,
177
+ "eval_samples_per_second": 22.753,
178
+ "eval_steps_per_second": 0.364,
179
  "step": 100
180
  },
181
  {
 
329
  "eval_rewards/chosen": -0.899510383605957,
330
  "eval_rewards/margins": 0.8722902536392212,
331
  "eval_rewards/rejected": -1.7718006372451782,
332
+ "eval_runtime": 87.889,
333
+ "eval_samples_per_second": 22.756,
334
+ "eval_steps_per_second": 0.364,
335
  "step": 200
336
  },
337
  {
 
485
  "eval_rewards/chosen": -0.7945692539215088,
486
  "eval_rewards/margins": 0.8339425921440125,
487
  "eval_rewards/rejected": -1.628511905670166,
488
+ "eval_runtime": 88.1333,
489
+ "eval_samples_per_second": 22.693,
490
+ "eval_steps_per_second": 0.363,
491
  "step": 300
492
  },
493
  {
 
641
  "eval_rewards/chosen": -0.9776647090911865,
642
  "eval_rewards/margins": 0.9803228974342346,
643
  "eval_rewards/rejected": -1.9579875469207764,
644
+ "eval_runtime": 88.4314,
645
+ "eval_samples_per_second": 22.616,
646
  "eval_steps_per_second": 0.362,
647
  "step": 400
648
  },
 
748
  "epoch": 1.0,
749
  "step": 478,
750
  "total_flos": 0.0,
751
+ "train_loss": 0.5366686437918052,
752
+ "train_runtime": 5259.7251,
753
+ "train_samples_per_second": 11.623,
754
+ "train_steps_per_second": 0.091
755
  }
756
  ],
757
  "logging_steps": 10,