Kolibri753
commited on
Commit
•
d4a840a
1
Parent(s):
8e2225e
Change num_attention_heads and num_kv_heads to value 32
Browse files- config.json +2 -2
config.json
CHANGED
@@ -24,9 +24,9 @@
|
|
24 |
"model_type": "falcon",
|
25 |
"multi_query": true,
|
26 |
"new_decoder_architecture": false,
|
27 |
-
"num_attention_heads":
|
28 |
"num_hidden_layers": 32,
|
29 |
-
"num_kv_heads":
|
30 |
"parallel_attn": true,
|
31 |
"torch_dtype": "float16",
|
32 |
"transformers_version": "4.35.0.dev0",
|
|
|
24 |
"model_type": "falcon",
|
25 |
"multi_query": true,
|
26 |
"new_decoder_architecture": false,
|
27 |
+
"num_attention_heads": 32,
|
28 |
"num_hidden_layers": 32,
|
29 |
+
"num_kv_heads": 32,
|
30 |
"parallel_attn": true,
|
31 |
"torch_dtype": "float16",
|
32 |
"transformers_version": "4.35.0.dev0",
|