Upload model
Browse files- README.md +1 -1
- adapter_config.json +5 -5
- adapter_model.bin +2 -2
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
library_name: peft
|
3 |
-
base_model:
|
4 |
---
|
5 |
|
6 |
# Model Card for Model ID
|
|
|
1 |
---
|
2 |
library_name: peft
|
3 |
+
base_model: bigscience/bloomz-560m
|
4 |
---
|
5 |
|
6 |
# Model Card for Model ID
|
adapter_config.json
CHANGED
@@ -1,15 +1,15 @@
|
|
1 |
{
|
2 |
"auto_mapping": null,
|
3 |
-
"base_model_name_or_path": "
|
4 |
-
"encoder_hidden_size":
|
5 |
"inference_mode": true,
|
6 |
-
"num_attention_heads":
|
7 |
-
"num_layers":
|
8 |
"num_transformer_submodules": 1,
|
9 |
"num_virtual_tokens": 30,
|
10 |
"peft_type": "PREFIX_TUNING",
|
11 |
"prefix_projection": false,
|
12 |
"revision": null,
|
13 |
"task_type": "CAUSAL_LM",
|
14 |
-
"token_dim":
|
15 |
}
|
|
|
1 |
{
|
2 |
"auto_mapping": null,
|
3 |
+
"base_model_name_or_path": "bigscience/bloomz-560m",
|
4 |
+
"encoder_hidden_size": 1024,
|
5 |
"inference_mode": true,
|
6 |
+
"num_attention_heads": 16,
|
7 |
+
"num_layers": 24,
|
8 |
"num_transformer_submodules": 1,
|
9 |
"num_virtual_tokens": 30,
|
10 |
"peft_type": "PREFIX_TUNING",
|
11 |
"prefix_projection": false,
|
12 |
"revision": null,
|
13 |
"task_type": "CAUSAL_LM",
|
14 |
+
"token_dim": 1024
|
15 |
}
|
adapter_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:236606f3b27acd50d2094bb3952148d3e034b55d0690c463201dcc7f277c4234
|
3 |
+
size 5899514
|