Update config.json
Browse files- config.json +25 -5
config.json
CHANGED
@@ -1,7 +1,27 @@
|
|
1 |
{ "_name_or_path": "ybelkada/falcon-7b-sharded-bf16",
|
2 |
-
|
3 |
-
"
|
4 |
-
|
5 |
-
"
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
"use_cache": false, "vocab_size": 65024 }
|
|
|
1 |
{ "_name_or_path": "ybelkada/falcon-7b-sharded-bf16",
|
2 |
+
|
3 |
+
"alibi": false, "apply_residual_connection_post_layernorm": false,
|
4 |
+
|
5 |
+
"architectures": [ "FalconForCausalLM" ],
|
6 |
+
|
7 |
+
"attention_dropout": 0.0, "bias": false, "bos_token_id": 11,
|
8 |
+
|
9 |
+
"eos_token_id": 11, "hidden_dropout": 0.0, "hidden_size": 4544, "initializer_range": 0.02,
|
10 |
+
|
11 |
+
"layer_norm_epsilon": 1e-05, "max_position_embeddings": 2048, "model_type": "falcon",
|
12 |
+
|
13 |
+
"multi_query": true, "n_head": 71, "n_layer": 32, "new_decoder_architecture": false, "num_attention_heads": 71,
|
14 |
+
|
15 |
+
"num_hidden_layers": 32, "num_kv_heads": 71, "parallel_attn": true,
|
16 |
+
|
17 |
+
"quantization_config": { "bnb_4bit_compute_dtype": "bfloat16", "bnb_4bit_quant_type": "nf4",
|
18 |
+
|
19 |
+
"bnb_4bit_use_double_quant": true, "llm_int8_enable_fp32_cpu_offload": false, "llm_int8_has_fp16_weight": false,
|
20 |
+
|
21 |
+
"llm_int8_skip_modules": null, "llm_int8_threshold": 6.0,
|
22 |
+
|
23 |
+
"load_in_4bit": true, "load_in_8bit": false, "quant_method": "bitsandbytes" },
|
24 |
+
|
25 |
+
"rope_scaling": null, "rope_theta": 10000.0, "torch_dtype": "bfloat16", "transformers_version": "4.35.2",
|
26 |
+
|
27 |
"use_cache": false, "vocab_size": 65024 }
|