Weyaxi commited on
Commit
388b026
1 Parent(s): 6de808b

Upload 2 files

Browse files
configs/science_config.yml ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: mistralai/Mistral-7B-v0.1
2
+ model_type: MistralForCausalLM
3
+ tokenizer_type: LlamaTokenizer
4
+ is_mistral_derived_model: true
5
+
6
+ load_in_8bit: true
7
+ load_in_4bit: false
8
+ strict: false
9
+
10
+ datasets:
11
+ - path: sci-datasets/arc_challange_train_alpaca.json
12
+ ds_type: json
13
+ type: alpaca
14
+
15
+ - path: sci-datasets/camelai_biology_alpaca.json
16
+ ds_type: json
17
+ type: alpaca
18
+
19
+ - path: sci-datasets/camelai_chemistry_alpaca.json
20
+ ds_type: json
21
+ type: alpaca
22
+
23
+ - path: sci-datasets/camelai_physics_alpaca.json
24
+ ds_type: json
25
+ type: alpaca
26
+
27
+ - path: sci-datasets/openbookqa_alpaca.json
28
+ ds_type: json
29
+ type: alpaca
30
+
31
+ - path: sci-datasets/reclor_science_alpaca.json
32
+ ds_type: json
33
+ type: alpaca
34
+
35
+ - path: sci-datasets/scibench_alpaca.json
36
+ ds_type: json
37
+ type: alpaca
38
+
39
+ - path: sci-datasets/scienceqa_alpaca.json
40
+ ds_type: json
41
+ type: alpaca
42
+
43
+ - path: sci-datasets/theoremqa_alpaca.json
44
+ ds_type: json
45
+ type: alpaca
46
+
47
+ - path: sci-datasets/tiger_scienceeval_alpaca.json
48
+ ds_type: json
49
+ type: alpaca
50
+
51
+ dataset_prepared_path: last_run_prepared
52
+ val_set_size: 0
53
+ output_dir: ./science-mistral
54
+
55
+ adapter: lora
56
+ lora_model_dir:
57
+
58
+ sequence_len: 8192
59
+ sample_packing: true
60
+ pad_to_sequence_len: true
61
+
62
+ lora_r: 64
63
+ lora_alpha: 32
64
+ lora_dropout: 0.05
65
+ lora_target_linear: true
66
+ lora_fan_in_fan_out:
67
+ lora_target_modules:
68
+ - gate_proj
69
+ - down_proj
70
+ - up_proj
71
+ - q_proj
72
+ - v_proj
73
+ - k_proj
74
+ - o_proj
75
+
76
+ wandb_project: huggingface
77
+ wandb_entity:
78
+ wandb_watch:
79
+ wandb_name:
80
+ wandb_log_model:
81
+
82
+ # change #
83
+ gradient_accumulation_steps: 4
84
+ micro_batch_size: 4
85
+ num_epochs: 1
86
+ optimizer: adamw_bnb_8bit
87
+ lr_scheduler: cosine
88
+ learning_rate: 0.0002
89
+ # change #
90
+
91
+ train_on_inputs: false
92
+ group_by_length: false
93
+ bf16: true
94
+ fp16: false
95
+ tf32: false
96
+
97
+ gradient_checkpointing: true
98
+ early_stopping_patience:
99
+ resume_from_checkpoint:
100
+ local_rank:
101
+ logging_steps: 1
102
+ xformers_attention:
103
+ flash_attention: true
104
+
105
+ warmup_steps: 10
106
+
107
+
108
+ saves_per_epoch: 2
109
+ debug:
110
+ deepspeed:
111
+ weight_decay: 0.1
112
+ fsdp:
113
+ fsdp_config:
114
+ special_tokens:
115
+ bos_token: "<s>"
116
+ eos_token: "</s>"
117
+ unk_token: "<unk>"
configs/science_config_qlora.yml ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: mistralai/Mistral-7B-v0.1
2
+ model_type: MistralForCausalLM
3
+ tokenizer_type: LlamaTokenizer
4
+ is_mistral_derived_model: true
5
+
6
+ load_in_8bit: false
7
+ load_in_4bit: true
8
+ strict: false
9
+
10
+ datasets:
11
+ - path: sci-datasets/arc_challange_train_alpaca.json
12
+ ds_type: json
13
+ type: alpaca
14
+
15
+ - path: sci-datasets/camelai_biology_alpaca.json
16
+ ds_type: json
17
+ type: alpaca
18
+
19
+ - path: sci-datasets/camelai_chemistry_alpaca.json
20
+ ds_type: json
21
+ type: alpaca
22
+
23
+ - path: sci-datasets/camelai_physics_alpaca.json
24
+ ds_type: json
25
+ type: alpaca
26
+
27
+ - path: sci-datasets/openbookqa_alpaca.json
28
+ ds_type: json
29
+ type: alpaca
30
+
31
+ - path: sci-datasets/reclor_science_alpaca.json
32
+ ds_type: json
33
+ type: alpaca
34
+
35
+ - path: sci-datasets/scibench_alpaca.json
36
+ ds_type: json
37
+ type: alpaca
38
+
39
+ - path: sci-datasets/scienceqa_alpaca.json
40
+ ds_type: json
41
+ type: alpaca
42
+
43
+ - path: sci-datasets/theoremqa_alpaca.json
44
+ ds_type: json
45
+ type: alpaca
46
+
47
+ - path: sci-datasets/tiger_scienceeval_alpaca.json
48
+ ds_type: json
49
+ type: alpaca
50
+
51
+ dataset_prepared_path: last_run_prepared
52
+ val_set_size: 0
53
+ output_dir: ./science-mistral
54
+
55
+ adapter: qlora
56
+ lora_model_dir:
57
+
58
+ sequence_len: 8192
59
+ sample_packing: true
60
+ pad_to_sequence_len: true
61
+
62
+ lora_r: 128
63
+ lora_alpha: 64
64
+ lora_dropout: 0.05
65
+ lora_target_linear: true
66
+ lora_fan_in_fan_out:
67
+ lora_target_modules:
68
+ - gate_proj
69
+ - down_proj
70
+ - up_proj
71
+ - q_proj
72
+ - v_proj
73
+ - k_proj
74
+ - o_proj
75
+
76
+ wandb_project: huggingface
77
+ wandb_entity:
78
+ wandb_watch:
79
+ wandb_name:
80
+ wandb_log_model:
81
+ hub_model_id: Weyaxi/science-mistral
82
+
83
+ # change #
84
+ gradient_accumulation_steps: 12
85
+ micro_batch_size: 6
86
+ num_epochs: 2
87
+ optimizer: adamw_bnb_8bit
88
+ lr_scheduler: cosine
89
+ learning_rate: 0.0002
90
+ # change #
91
+
92
+ train_on_inputs: false
93
+ group_by_length: false
94
+ bf16: true
95
+ fp16: false
96
+ tf32: false
97
+
98
+ gradient_checkpointing: true
99
+ early_stopping_patience:
100
+ resume_from_checkpoint:
101
+ local_rank:
102
+ logging_steps: 1
103
+ xformers_attention:
104
+ flash_attention: true
105
+
106
+ warmup_steps: 10
107
+
108
+
109
+ saves_per_epoch: 3
110
+ debug:
111
+ deepspeed:
112
+ weight_decay: 0.1
113
+ fsdp:
114
+ fsdp_config:
115
+ special_tokens:
116
+ bos_token: "<s>"
117
+ eos_token: "</s>"
118
+ unk_token: "<unk>"