NovoCode commited on
Commit
8726f72
1 Parent(s): bccfb1e

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +131 -0
README.md ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: mistralai/Mistral-7B-v0.1
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: out
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
15
+ <details><summary>See axolotl config</summary>
16
+
17
+ axolotl version: `0.4.0`
18
+ ```yaml
19
+ base_model: mistralai/Mistral-7B-v0.1
20
+ model_type: MistralForCausalLM
21
+ tokenizer_type: LlamaTokenizer
22
+ is_mistral_derived_model: true
23
+
24
+ load_in_8bit: false
25
+ load_in_4bit: false
26
+ strict: false
27
+
28
+ rl: dpo
29
+ datasets:
30
+ - path: NeuralNovel/Neural-DPO
31
+ split: train
32
+ type: chatml.intel
33
+ format: "[INST] {instruction} [/INST]"
34
+ no_input_format: "[INST] {instruction} [/INST]"
35
+ dataset_prepared_path:
36
+ val_set_size: 0.05
37
+ output_dir: ./out
38
+
39
+ sequence_len: 8192
40
+ sample_packing: false
41
+ pad_to_sequence_len: true
42
+ eval_sample_packing: false
43
+
44
+ wandb_project:
45
+ wandb_entity:
46
+ wandb_watch:
47
+ wandb_name: Neural-DPO
48
+ wandb_log_model:
49
+
50
+ gradient_accumulation_steps: 4
51
+ micro_batch_size: 2
52
+ num_epochs: 6
53
+ optimizer: adamw_bnb_8bit
54
+ lr_scheduler: cosine
55
+ learning_rate: 0.000005
56
+
57
+ train_on_inputs: false
58
+ group_by_length: false
59
+ bf16: auto
60
+ fp16:
61
+ tf32: false
62
+
63
+ gradient_checkpointing: true
64
+ early_stopping_patience:
65
+ resume_from_checkpoint:
66
+ local_rank:
67
+ logging_steps: 1
68
+ xformers_attention:
69
+ flash_attention: true
70
+
71
+ warmup_steps: 10
72
+ evals_per_epoch: 4
73
+ eval_table_size:
74
+ eval_max_new_tokens: 128
75
+ saves_per_epoch: 0
76
+ debug:
77
+ deepspeed:
78
+ weight_decay: 0.0
79
+ fsdp:
80
+ fsdp_config:
81
+ special_tokens:
82
+ bos_token: "<s>"
83
+ eos_token: "</s>"
84
+ unk_token: "<unk>"
85
+
86
+ ```
87
+
88
+ </details><br>
89
+
90
+ # out
91
+
92
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on an unknown dataset.
93
+
94
+ ## Model description
95
+
96
+ More information needed
97
+
98
+ ## Intended uses & limitations
99
+
100
+ More information needed
101
+
102
+ ## Training and evaluation data
103
+
104
+ More information needed
105
+
106
+ ## Training procedure
107
+
108
+ ### Training hyperparameters
109
+
110
+ The following hyperparameters were used during training:
111
+ - learning_rate: 5e-06
112
+ - train_batch_size: 2
113
+ - eval_batch_size: 8
114
+ - seed: 42
115
+ - gradient_accumulation_steps: 4
116
+ - total_train_batch_size: 8
117
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
118
+ - lr_scheduler_type: cosine
119
+ - lr_scheduler_warmup_steps: 10
120
+ - training_steps: 801
121
+
122
+ ### Training results
123
+
124
+
125
+
126
+ ### Framework versions
127
+
128
+ - Transformers 4.38.0.dev0
129
+ - Pytorch 2.2.0+cu121
130
+ - Datasets 2.17.1
131
+ - Tokenizers 0.15.0