BobaZooba commited on
Commit
e12ae32
1 Parent(s): 9d469d9

Training in progress, step 300

Browse files
adapter_config.json CHANGED
@@ -14,13 +14,13 @@
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
- "gate_proj",
18
- "k_proj",
19
  "q_proj",
20
- "up_proj",
21
- "down_proj",
22
  "v_proj",
23
- "o_proj"
 
 
 
 
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
 
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
 
 
17
  "q_proj",
 
 
18
  "v_proj",
19
+ "up_proj",
20
+ "k_proj",
21
+ "o_proj",
22
+ "gate_proj",
23
+ "down_proj"
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ee2ec909333d3905200788230f22925c9ffad5b1eaa153c525a871e6167df588
3
- size 6264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d0551ad96bb3a47f40b27101c8057f425b3b65b4a89dd1b9631ee395474864c
3
+ size 6200
training_config.json CHANGED
@@ -59,7 +59,7 @@
59
  "raw_lora_target_modules": "all",
60
  "output_dir": "./outputs/",
61
  "per_device_train_batch_size": 8,
62
- "do_eval": false,
63
  "per_device_eval_batch_size": null,
64
  "gradient_accumulation_steps": 2,
65
  "eval_accumulation_steps": null,
 
59
  "raw_lora_target_modules": "all",
60
  "output_dir": "./outputs/",
61
  "per_device_train_batch_size": 8,
62
+ "do_eval": true,
63
  "per_device_eval_batch_size": null,
64
  "gradient_accumulation_steps": 2,
65
  "eval_accumulation_steps": null,