Abinesh commited on
Commit
9a97919
·
1 Parent(s): a7997c2

Delete llama_2_adapter

Browse files
llama_2_adapter/README.md DELETED
@@ -1,21 +0,0 @@
1
- ---
2
- library_name: peft
3
- ---
4
- ## Training procedure
5
-
6
-
7
- The following `bitsandbytes` quantization config was used during training:
8
- - quant_method: QuantizationMethod.BITS_AND_BYTES
9
- - load_in_8bit: False
10
- - load_in_4bit: True
11
- - llm_int8_threshold: 6.0
12
- - llm_int8_skip_modules: None
13
- - llm_int8_enable_fp32_cpu_offload: False
14
- - llm_int8_has_fp16_weight: False
15
- - bnb_4bit_quant_type: nf4
16
- - bnb_4bit_use_double_quant: True
17
- - bnb_4bit_compute_dtype: bfloat16
18
- ### Framework versions
19
-
20
-
21
- - PEFT 0.4.0.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llama_2_adapter/adapter_config.json DELETED
@@ -1,21 +0,0 @@
1
- {
2
- "base_model_name_or_path": "meta-llama/Llama-2-7b-chat-hf",
3
- "bias": "none",
4
- "fan_in_fan_out": false,
5
- "inference_mode": true,
6
- "init_lora_weights": true,
7
- "layers_pattern": null,
8
- "layers_to_transform": null,
9
- "lora_alpha": 32,
10
- "lora_dropout": 0.05,
11
- "modules_to_save": null,
12
- "peft_type": "LORA",
13
- "r": 8,
14
- "revision": null,
15
- "target_modules": [
16
- "q_proj",
17
- "k_proj",
18
- "v_proj"
19
- ],
20
- "task_type": "CAUSAL_LM"
21
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llama_2_adapter/adapter_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:715ae257199edde71ec5489cd3df29634e2d6e49de41d19c85d65a8d6d53a545
3
- size 25234701
 
 
 
 
llama_2_adapter/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7228d256901ac224db1b4d229c9c22d5c5a0db9d57b13da26daf55eb856deda8
3
- size 4027