IlyaGusev commited on
Commit
ff656e6
1 Parent(s): 7bb8205

New params

Browse files
Files changed (2) hide show
  1. generation_config.json +4 -4
  2. training_config.json +35 -0
generation_config.json CHANGED
@@ -3,11 +3,11 @@
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
  "temperature": 1.0,
6
- "top_p": 0.9,
7
  "top_k": 40,
8
  "do_sample": true,
9
- "max_length": 1024,
10
  "num_beams": 2,
11
- "repetition_penalty": 1.1,
12
- "no_repeat_ngram_size": 5
13
  }
 
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
  "temperature": 1.0,
6
+ "top_p": 0.95,
7
  "top_k": 40,
8
  "do_sample": true,
9
+ "max_new_tokens": 1024,
10
  "num_beams": 2,
11
+ "repetition_penalty": 1.05,
12
+ "no_repeat_ngram_size": 6
13
  }
training_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "trainer": {
3
+ "evaluation_strategy": "steps",
4
+ "per_device_train_batch_size": 12,
5
+ "per_device_eval_batch_size": 12,
6
+ "gradient_accumulation_steps": 10,
7
+ "eval_steps": 50,
8
+ "save_steps": 50,
9
+ "logging_steps": 5,
10
+ "learning_rate": 0.0003,
11
+ "num_train_epochs": 3,
12
+ "lr_scheduler_type": "cosine",
13
+ "warmup_steps": 30,
14
+ "fp16": true,
15
+ "bf16": false,
16
+ "torch_compile": false,
17
+ "optim": "adamw_torch"
18
+ },
19
+ "lora": {
20
+ "r": 8,
21
+ "lora_alpha": 16,
22
+ "lora_dropout": 0.05,
23
+ "bias": "none",
24
+ "target_modules": ["q_proj", "v_proj"],
25
+ "task_type": "CAUSAL_LM"
26
+ },
27
+ "load_in_8bit": true,
28
+ "only_target_loss": true,
29
+ "model": "chat",
30
+ "templates_path": "ru_saiga_template.json",
31
+ "model_name": "models/llama-7b-hf",
32
+ "model_type": "causal",
33
+ "max_tokens_count": 1024
34
+ }
35
+