Abhay06102003 commited on
Commit
edea6cb
1 Parent(s): 4496534

Upload model

Browse files
Files changed (2) hide show
  1. adapter_config.json +9 -9
  2. adapter_model.safetensors +2 -2
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "unsloth/mistral-7b-v0.3-bnb-4bit",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -10,22 +10,22 @@
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
- "lora_alpha": 64,
14
- "lora_dropout": 0.01,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 16,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "k_proj",
24
- "q_proj",
25
- "o_proj",
26
- "v_proj"
27
  ],
28
- "task_type": "CAUSAL_LM",
29
  "use_dora": false,
30
  "use_rslora": false
31
  }
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "google/flan-t5-large",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 8,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "q",
24
+ "v",
25
+ "o",
26
+ "k"
27
  ],
28
+ "task_type": "SEQ_2_SEQ_LM",
29
  "use_dora": false,
30
  "use_rslora": false
31
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fdb9affd53818b209f0d1bba091fd9d64d7e080d739f08356e1203f21723e305
3
- size 54560368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c4041d0f5d91b7461c0b6a4ba79c6b66eee85088b504f30753e3fcf00a2dca2
3
+ size 9518448