TeetouchQQ commited on
Commit
9d5b554
1 Parent(s): ead4922

Upload cfg.yaml

Browse files
Files changed (1) hide show
  1. cfg.yaml +120 -0
cfg.yaml ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ architecture:
2
+ backbone_dtype: float16
3
+ force_embedding_gradients: false
4
+ gradient_checkpointing: true
5
+ intermediate_dropout: 0.0
6
+ pretrained: true
7
+ pretrained_weights: ''
8
+ augmentation:
9
+ random_parent_probability: 0.0
10
+ skip_parent_probability: 0.0
11
+ token_mask_probability: 0.0
12
+ dataset:
13
+ add_eos_token_to_answer: true
14
+ add_eos_token_to_prompt: true
15
+ add_eos_token_to_system: true
16
+ answer_column: output
17
+ chatbot_author: H2O.ai
18
+ chatbot_name: h2oGPT
19
+ data_sample: 1.0
20
+ data_sample_choice:
21
+ - Train
22
+ - Validation
23
+ limit_chained_samples: false
24
+ mask_prompt_labels: true
25
+ parent_id_column: None
26
+ personalize: false
27
+ prompt_column:
28
+ - instruction
29
+ - input
30
+ system_column: None
31
+ text_answer_separator: <|answer|>
32
+ text_prompt_start: <|prompt|>
33
+ text_system_start: <|system|>
34
+ train_dataframe: /tf/project/h2o-llmstudio/data/user/train_h2oV1/train_h2oV1.csv
35
+ validation_dataframe: None
36
+ validation_size: 0.1
37
+ validation_strategy: automatic
38
+ environment:
39
+ compile_model: false
40
+ find_unused_parameters: false
41
+ gpus:
42
+ - '0'
43
+ - '1'
44
+ huggingface_branch: main
45
+ mixed_precision: true
46
+ number_of_workers: 8
47
+ seed: -1
48
+ trust_remote_code: true
49
+ use_fsdp: false
50
+ experiment_name: topaz-coot
51
+ hf:
52
+ account_name: ''
53
+ model_name: ''
54
+ llm_backbone: tiiuae/falcon-7b
55
+ logging:
56
+ logger: None
57
+ neptune_project: ''
58
+ number_of_texts: 10
59
+ output_directory: /tf/project/h2o-llmstudio/output/user/topaz-coot/
60
+ prediction:
61
+ batch_size_inference: 0
62
+ do_sample: false
63
+ max_length_inference: 256
64
+ metric: Perplexity
65
+ metric_gpt_model: gpt-3.5-turbo-0301
66
+ min_length_inference: 2
67
+ num_beams: 1
68
+ num_history: 4
69
+ repetition_penalty: 1.2
70
+ stop_tokens: ''
71
+ temperature: 0.3
72
+ top_k: 0
73
+ top_p: 1.0
74
+ problem_type: text_causal_language_modeling
75
+ tokenizer:
76
+ add_prefix_space: false
77
+ add_prompt_answer_tokens: false
78
+ max_length: 512
79
+ max_length_answer: 256
80
+ max_length_prompt: 512
81
+ padding_quantile: 1.0
82
+ use_fast: true
83
+ training:
84
+ adaptive_kl_control: true
85
+ advantages_gamma: 0.99
86
+ advantages_lambda: 0.95
87
+ batch_size: 4
88
+ differential_learning_rate: 1.0e-05
89
+ differential_learning_rate_layers: []
90
+ drop_last_batch: true
91
+ epochs: 1
92
+ evaluate_before_training: false
93
+ evaluation_epochs: 1.0
94
+ grad_accumulation: 4
95
+ gradient_clip: 0.0
96
+ initial_kl_coefficient: 0.2
97
+ kl_horizon: 10000
98
+ kl_target: 6.0
99
+ learning_rate: 0.0001
100
+ lora: true
101
+ lora_alpha: 32
102
+ lora_dropout: 0.05
103
+ lora_r: 16
104
+ lora_target_modules: ''
105
+ loss_function: TokenAveragedCrossEntropy
106
+ offload_reward_model: false
107
+ optimizer: AdamW
108
+ ppo_batch_size: 1
109
+ ppo_clip_policy: 0.2
110
+ ppo_clip_value: 0.2
111
+ ppo_epochs: 4
112
+ ppo_generate_temperature: 1.0
113
+ reward_model: OpenAssistant/reward-model-deberta-v3-large-v2
114
+ save_best_checkpoint: false
115
+ scaling_factor_value_loss: 0.1
116
+ schedule: Cosine
117
+ train_validation_data: false
118
+ use_rlhf: false
119
+ warmup_epochs: 0.0
120
+ weight_decay: 0.0