Create training_params.json
Browse files- training_params.json +45 -0
training_params.json
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model": "mistralai/Mixtral-8x7B-v0.1",
|
3 |
+
"project_name": "/tmp/model",
|
4 |
+
"train_split": "train",
|
5 |
+
"valid_split": null,
|
6 |
+
"add_eos_token": true,
|
7 |
+
"block_size": 1024,
|
8 |
+
"model_max_length": 2048,
|
9 |
+
"padding": "right",
|
10 |
+
"trainer": "sft",
|
11 |
+
"use_flash_attention_2": false,
|
12 |
+
"log": "tensorboard",
|
13 |
+
"disable_gradient_checkpointing": false,
|
14 |
+
"logging_steps": -1,
|
15 |
+
"evaluation_strategy": "epoch",
|
16 |
+
"save_total_limit": 1,
|
17 |
+
"save_strategy": "epoch",
|
18 |
+
"auto_find_batch_size": false,
|
19 |
+
"mixed_precision": "fp16",
|
20 |
+
"lr": 3e-05,
|
21 |
+
"epochs": 3,
|
22 |
+
"batch_size": 2,
|
23 |
+
"warmup_ratio": 0.1,
|
24 |
+
"gradient_accumulation": 1,
|
25 |
+
"optimizer": "adamw_torch",
|
26 |
+
"scheduler": "linear",
|
27 |
+
"weight_decay": 0.0,
|
28 |
+
"max_grad_norm": 1.0,
|
29 |
+
"seed": 42,
|
30 |
+
"apply_chat_template": false,
|
31 |
+
"quantization": "int4",
|
32 |
+
"target_modules": "",
|
33 |
+
"merge_adapter": false,
|
34 |
+
"peft": true,
|
35 |
+
"lora_r": 16,
|
36 |
+
"lora_alpha": 16,
|
37 |
+
"lora_dropout": 0.1,
|
38 |
+
"model_ref": null,
|
39 |
+
"dpo_beta": 0.1,
|
40 |
+
"prompt_text_column": "autotrain_prompt",
|
41 |
+
"text_column": "autotrain_text",
|
42 |
+
"rejected_text_column": "autotrain_rejected_text",
|
43 |
+
"push_to_hub": true,
|
44 |
+
"username": "HHazard"
|
45 |
+
}
|