noxneural commited on
Commit
fdc05c4
1 Parent(s): 6b1b79b

Upload trained [Ludwig](https://ludwig.ai/latest/) model weights

Browse files
Files changed (3) hide show
  1. README.md +19 -1
  2. adapter_config.json +3 -3
  3. adapter_model.bin +2 -2
README.md CHANGED
@@ -6,7 +6,6 @@ base_model: HuggingFaceH4/zephyr-7b-beta
6
  # Model Card for Model ID
7
 
8
  <!-- Provide a quick summary of what the model is/does. -->
9
- ![Kashaloti-LiliumAlb-Zephyr](https://huggingface.co/noxneural/MistralKashaloti/resolve/main/Kashaloti-LiliumAlb-Zephyr.png)
10
 
11
 
12
 
@@ -236,4 +235,23 @@ The following `bitsandbytes` quantization config was used during training:
236
  ### Framework versions
237
 
238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  - PEFT 0.6.0.dev0
 
6
  # Model Card for Model ID
7
 
8
  <!-- Provide a quick summary of what the model is/does. -->
 
9
 
10
 
11
 
 
235
  ### Framework versions
236
 
237
 
238
+ - PEFT 0.6.0.dev0
239
+ ## Training procedure
240
+
241
+
242
+ The following `bitsandbytes` quantization config was used during training:
243
+ - quant_method: QuantizationMethod.BITS_AND_BYTES
244
+ - load_in_8bit: True
245
+ - load_in_4bit: False
246
+ - llm_int8_threshold: 6.0
247
+ - llm_int8_skip_modules: None
248
+ - llm_int8_enable_fp32_cpu_offload: False
249
+ - llm_int8_has_fp16_weight: False
250
+ - bnb_4bit_quant_type: nf4
251
+ - bnb_4bit_use_double_quant: True
252
+ - bnb_4bit_compute_dtype: float16
253
+
254
+ ### Framework versions
255
+
256
+
257
  - PEFT 0.6.0.dev0
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "HuggingFaceH4/zephyr-7b-beta",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -16,8 +16,8 @@
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "v_proj",
20
- "q_proj"
21
  ],
22
  "task_type": "CAUSAL_LM"
23
  }
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
+ "q_proj",
20
+ "v_proj"
21
  ],
22
  "task_type": "CAUSAL_LM"
23
  }
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8704b2c73849c5b1d78a210fbfeef97e52be7c8bd5638ea0e36d6df1b33dd66
3
- size 13677706
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eaa51169fb5f5f33b328f26090dcd19a47bde0a9efe64e78856dbbe04a07e7a
3
+ size 888