Upload config.json with huggingface_hub
Browse files- config.json +22 -27
config.json
CHANGED
@@ -1,29 +1,24 @@
|
|
1 |
{
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
"
|
24 |
-
"q_proj"
|
25 |
-
],
|
26 |
-
"task_type": "CAUSAL_LM",
|
27 |
-
"use_dora": false,
|
28 |
-
"use_rslora": false
|
29 |
}
|
|
|
1 |
{
|
2 |
+
"dataset_name": "data/packaged_pretrain_dataset.parquet",
|
3 |
+
"num_proc": 1,
|
4 |
+
"max_seq_length": 32,
|
5 |
+
"seed": 0,
|
6 |
+
"optim": "adamw_torch",
|
7 |
+
"max_steps": 60,
|
8 |
+
"per_device_train_batch_size": 2,
|
9 |
+
"learning_rate": 1e-05,
|
10 |
+
"weight_decay": 0,
|
11 |
+
"warmup_steps": 10,
|
12 |
+
"lr_scheduler_type": "linear",
|
13 |
+
"gradient_checkpointing": true,
|
14 |
+
"dataloader_num_workers": 2,
|
15 |
+
"bf16": true,
|
16 |
+
"gradient_accumulation_steps": 1,
|
17 |
+
"logging_steps": 3,
|
18 |
+
"report_to": [],
|
19 |
+
"save_strategy": "steps",
|
20 |
+
"save_steps": 3,
|
21 |
+
"save_total_limit": 1,
|
22 |
+
"push_to_hub": true,
|
23 |
+
"hub_model_id": "hienbm/psychology-llama3.1-8B"
|
|
|
|
|
|
|
|
|
|
|
24 |
}
|