Upload folder using huggingface_hub
#3
by
lrl-modelcloud
- opened
- config.json +5 -3
- model-00001-of-00006.safetensors +3 -0
- model-00002-of-00006.safetensors +3 -0
- model-00003-of-00006.safetensors +3 -0
- model-00004-of-00006.safetensors +3 -0
- model-00005-of-00006.safetensors +3 -0
- model-00006-of-00006.safetensors +3 -0
- model.safetensors.index.json +0 -0
- quantize_config.json +4 -2
config.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"_attn_implementation_autoset": true,
|
3 |
-
"_name_or_path": "/monster/data/
|
4 |
"architectures": [
|
5 |
"Qwen2ForCausalLM"
|
6 |
],
|
@@ -20,14 +20,16 @@
|
|
20 |
"quantization_config": {
|
21 |
"bits": 4,
|
22 |
"checkpoint_format": "gptq",
|
|
|
|
|
23 |
"desc_act": true,
|
24 |
"dynamic": null,
|
25 |
"group_size": 32,
|
26 |
"lm_head": false,
|
27 |
"meta": {
|
28 |
"damp_auto_increment": 0.0015,
|
29 |
-
"damp_percent": 0.
|
30 |
-
"quantizer": "gptqmodel:1.2.
|
31 |
"uri": "https://github.com/modelcloud/gptqmodel"
|
32 |
},
|
33 |
"quant_method": "gptq",
|
|
|
1 |
{
|
2 |
"_attn_implementation_autoset": true,
|
3 |
+
"_name_or_path": "/monster/data/lrl/qwen2.5_coder_32b_quant/Q_upload/",
|
4 |
"architectures": [
|
5 |
"Qwen2ForCausalLM"
|
6 |
],
|
|
|
20 |
"quantization_config": {
|
21 |
"bits": 4,
|
22 |
"checkpoint_format": "gptq",
|
23 |
+
"damp_auto_increment": 0.0015,
|
24 |
+
"damp_percent": 0.005,
|
25 |
"desc_act": true,
|
26 |
"dynamic": null,
|
27 |
"group_size": 32,
|
28 |
"lm_head": false,
|
29 |
"meta": {
|
30 |
"damp_auto_increment": 0.0015,
|
31 |
+
"damp_percent": 0.005,
|
32 |
+
"quantizer": "gptqmodel:1.2.0-dev",
|
33 |
"uri": "https://github.com/modelcloud/gptqmodel"
|
34 |
},
|
35 |
"quant_method": "gptq",
|
model-00001-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b3c26d93cd0d4ccce97c738c7b5ba01c90722300954863f55753ab6c8d17dfa6
|
3 |
+
size 3932734920
|
model-00002-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8042372a5f9540504a4b2dec4a04023148f3f48b9d32e4e6043cc139ae2e3e39
|
3 |
+
size 3950222264
|
model-00003-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ad2aff54876644a0f3467754d84af25829c37ba4c093fe84485a7c6749dd1ac2
|
3 |
+
size 3950222320
|
model-00004-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c3e12abfa7617c2678b67cafd9c4e4a2f2c6c46547d4dc8cea1a0c51c67ba35e
|
3 |
+
size 3950222320
|
model-00005-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5dce89b1734b0f38803a0cac1e456e3ccff7bfdc544caa995266f616ffe84a77
|
3 |
+
size 3831902736
|
model-00006-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9ee0a67819e99d588a47b9c51ce3a3b3b819d7b007f8acfe8c1179b1db7a55be
|
3 |
+
size 1557135488
|
model.safetensors.index.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
quantize_config.json
CHANGED
@@ -6,13 +6,15 @@
|
|
6 |
"static_groups": false,
|
7 |
"sym": true,
|
8 |
"lm_head": false,
|
|
|
|
|
9 |
"true_sequential": true,
|
10 |
"quant_method": "gptq",
|
11 |
"checkpoint_format": "gptq",
|
12 |
"meta": {
|
13 |
-
"quantizer": "gptqmodel:1.2.
|
14 |
"uri": "https://github.com/modelcloud/gptqmodel",
|
15 |
-
"damp_percent": 0.
|
16 |
"damp_auto_increment": 0.0015
|
17 |
}
|
18 |
}
|
|
|
6 |
"static_groups": false,
|
7 |
"sym": true,
|
8 |
"lm_head": false,
|
9 |
+
"damp_percent": 0.005,
|
10 |
+
"damp_auto_increment": 0.0015,
|
11 |
"true_sequential": true,
|
12 |
"quant_method": "gptq",
|
13 |
"checkpoint_format": "gptq",
|
14 |
"meta": {
|
15 |
+
"quantizer": "gptqmodel:1.2.0-dev",
|
16 |
"uri": "https://github.com/modelcloud/gptqmodel",
|
17 |
+
"damp_percent": 0.005,
|
18 |
"damp_auto_increment": 0.0015
|
19 |
}
|
20 |
}
|