Abhinav Agarwalla
commited on
Commit
·
8a2a349
1
Parent(s):
2c97ea9
Updating pruned70 weights
Browse files- README.md +2 -2
- config.json +2 -1
- generation_config.json +1 -1
- model-00001-of-00003.safetensors +1 -1
- model-00002-of-00003.safetensors +1 -1
- model-00003-of-00003.safetensors +1 -1
README.md
CHANGED
@@ -51,7 +51,7 @@ Model evaluation metrics and results.
|
|
51 |
|
52 |
| Benchmark | Metric | Llama-2-7b-evolcodealpaca | Llama-2-7b-pruned70-retrained-evolcodealpaca |
|
53 |
|------------------------------------------------|---------------|-------------|-------------------------------|
|
54 |
-
| [HumanEval](https://arxiv.org/abs/2107.03374) | pass@1 | 32.03 |
|
55 |
|
56 |
## Model Training Details
|
57 |
|
@@ -61,4 +61,4 @@ The 50% sparse foundational model was finetuned for 2 epochs and then pruned to
|
|
61 |
|
62 |
## Help
|
63 |
|
64 |
-
For further support, and discussions on these models and AI in general, join [Neural Magic's Slack Community](https://join.slack.com/t/discuss-neuralmagic/shared_invite/zt-q1a1cnvo-YBoICSIw3L1dmQpjBeDurQ)
|
|
|
51 |
|
52 |
| Benchmark | Metric | Llama-2-7b-evolcodealpaca | Llama-2-7b-pruned70-retrained-evolcodealpaca |
|
53 |
|------------------------------------------------|---------------|-------------|-------------------------------|
|
54 |
+
| [HumanEval](https://arxiv.org/abs/2107.03374) | pass@1 | 32.03 | 36.3 |
|
55 |
|
56 |
## Model Training Details
|
57 |
|
|
|
61 |
|
62 |
## Help
|
63 |
|
64 |
+
For further support, and discussions on these models and AI in general, join [Neural Magic's Slack Community](https://join.slack.com/t/discuss-neuralmagic/shared_invite/zt-q1a1cnvo-YBoICSIw3L1dmQpjBeDurQ)
|
config.json
CHANGED
@@ -21,8 +21,9 @@
|
|
21 |
"rope_scaling": null,
|
22 |
"rope_theta": 10000.0,
|
23 |
"tie_word_embeddings": false,
|
|
|
24 |
"torch_dtype": "bfloat16",
|
25 |
-
"transformers_version": "4.
|
26 |
"use_cache": true,
|
27 |
"vocab_size": 32000
|
28 |
}
|
|
|
21 |
"rope_scaling": null,
|
22 |
"rope_theta": 10000.0,
|
23 |
"tie_word_embeddings": false,
|
24 |
+
"tokenizer_class": "LlamaTokenizerFast",
|
25 |
"torch_dtype": "bfloat16",
|
26 |
+
"transformers_version": "4.39.3",
|
27 |
"use_cache": true,
|
28 |
"vocab_size": 32000
|
29 |
}
|
generation_config.json
CHANGED
@@ -2,5 +2,5 @@
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
-
"transformers_version": "4.
|
6 |
}
|
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
+
"transformers_version": "4.39.3"
|
6 |
}
|
model-00001-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4938985352
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a7f324c1d481b1538dd9b8573ee3a39200b7b7eeb679ac5fd757e2385ee3feb3
|
3 |
size 4938985352
|
model-00002-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4947390880
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6925642cbd5d212d15e196c382daede5143061454c9ff505de45d934aca2ca98
|
3 |
size 4947390880
|
model-00003-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 3590488816
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d59debb66c27eee4873a1a82021023898e64941334f0fe656808369b7cada8c4
|
3 |
size 3590488816
|