gmazur591 commited on
Commit
42c9660
1 Parent(s): 2737a34

Upload model

Browse files
Files changed (3) hide show
  1. README.md +1 -20
  2. adapter_config.json +4 -4
  3. adapter_model.safetensors +2 -2
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  library_name: peft
3
- base_model: vilsonrodrigues/falcon-7b-instruct-sharded
4
  ---
5
 
6
  # Model Card for Model ID
@@ -236,23 +236,4 @@ The following `bitsandbytes` quantization config was used during training:
236
  ### Framework versions
237
 
238
 
239
- - PEFT 0.6.3.dev0
240
- ## Training procedure
241
-
242
-
243
- The following `bitsandbytes` quantization config was used during training:
244
- - quant_method: bitsandbytes
245
- - load_in_8bit: False
246
- - load_in_4bit: True
247
- - llm_int8_threshold: 6.0
248
- - llm_int8_skip_modules: None
249
- - llm_int8_enable_fp32_cpu_offload: False
250
- - llm_int8_has_fp16_weight: False
251
- - bnb_4bit_quant_type: nf4
252
- - bnb_4bit_use_double_quant: True
253
- - bnb_4bit_compute_dtype: bfloat16
254
-
255
- ### Framework versions
256
-
257
-
258
  - PEFT 0.6.3.dev0
 
1
  ---
2
  library_name: peft
3
+ base_model: tiiuae/falcon-7b-instruct
4
  ---
5
 
6
  # Model Card for Model ID
 
236
  ### Framework versions
237
 
238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  - PEFT 0.6.3.dev0
adapter_config.json CHANGED
@@ -1,18 +1,18 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "vilsonrodrigues/falcon-7b-instruct-sharded",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
- "lora_alpha": 32,
12
- "lora_dropout": 0.05,
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
- "r": 16,
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "tiiuae/falcon-7b-instruct",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
+ "lora_alpha": 16,
12
+ "lora_dropout": 0.01,
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
+ "r": 4,
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:22f4ac6e844b92023436cf7e9485fb3a4a0b998602c4c94b8063a43de4913f73
3
- size 18883912
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4ba64dff3ac2b983fe4272f17c2b711bcdf1a14490f853f98f80df6840c1e40
3
+ size 4727992