Liu-Xiang commited on
Commit
55376b3
·
verified ·
1 Parent(s): 8d66249

End of training

Browse files
README.md CHANGED
@@ -1,9 +1,11 @@
1
  ---
2
- license: apache-2.0
3
  library_name: peft
 
4
  tags:
 
 
5
  - generated_from_trainer
6
- base_model: cognitivecomputations/dolphin-2.1-mistral-7b
7
  model-index:
8
  - name: doplhin-dpo
9
  results: []
@@ -44,8 +46,8 @@ The following hyperparameters were used during training:
44
 
45
  ### Framework versions
46
 
47
- - PEFT 0.10.1.dev0
48
  - Transformers 4.38.2
49
- - Pytorch 2.0.1+cu118
50
- - Datasets 2.18.0
51
  - Tokenizers 0.15.2
 
1
  ---
2
+ base_model: cognitivecomputations/dolphin-2.1-mistral-7b
3
  library_name: peft
4
+ license: apache-2.0
5
  tags:
6
+ - trl
7
+ - dpo
8
  - generated_from_trainer
 
9
  model-index:
10
  - name: doplhin-dpo
11
  results: []
 
46
 
47
  ### Framework versions
48
 
49
+ - PEFT 0.8.2
50
  - Transformers 4.38.2
51
+ - Pytorch 2.2.2+cu121
52
+ - Datasets 2.16.1
53
  - Tokenizers 0.15.2
adapter_config.json CHANGED
@@ -19,13 +19,13 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "v_proj",
23
  "k_proj",
 
24
  "o_proj",
25
- "down_proj",
 
26
  "q_proj",
27
- "up_proj",
28
- "gate_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
22
  "k_proj",
23
+ "up_proj",
24
  "o_proj",
25
+ "v_proj",
26
+ "gate_proj",
27
  "q_proj",
28
+ "down_proj"
 
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3992d689964416450a7c68bbfe0ffe284a5ccca6275371329851d41d496995a
3
  size 1342239008
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d20ff35e98f2af2cb38cea995009df76fdd84ee6da458d8498c965811d56c055
3
  size 1342239008
tokenizer_config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
 
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
 
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
+ "add_prefix_space": true,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8aad1eb9a32a142c5a7fea96a5178705dfeb37949f3492091dca7493c1846af9
3
- size 4219
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2377deb56840e02977da981a26a5feb4e68219ed9b863bec1327fa272b7b229
3
+ size 4856