Dracones commited on
Commit
2109010
1 Parent(s): 0f08137

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ base_model: microsoft/WizardLM-2-8x22B
6
+ tags:
7
+ - exl2
8
+ ---
9
+
10
+ # WizardLM-2-8x22B - EXL2 6.0bpw
11
+
12
+ This is a 6.0bpw EXL2 quant of [microsoft/WizardLM-2-8x22B](https://huggingface.co/microsoft/WizardLM-2-8x22B)
13
+
14
+ Details about the model can be found at the above model page.
15
+
16
+ ## EXL2 Version
17
+
18
+ These quants were made with exllamav2 version 0.0.18. Quants made on this version of EXL2 may not work on older versions of the exllamav2 library.
19
+
20
+ If you have problems loading these models, please update Text Generation WebUI to the latest version.
21
+
22
+
23
+
24
+ ## Quant Details
25
+
26
+ This is the script used for quantization.
27
+
28
+ ```bash
29
+ #!/bin/bash
30
+
31
+ # Activate the conda environment
32
+ source ~/miniconda3/etc/profile.d/conda.sh
33
+ conda activate exllamav2
34
+
35
+ # Set the model name and bit size
36
+ MODEL_NAME="WizardLM-2-8x22B"
37
+
38
+ # Define variables
39
+ MODEL_DIR="/mnt/storage/models/$MODEL_NAME"
40
+ OUTPUT_DIR="exl2_$MODEL_NAME"
41
+ MEASUREMENT_FILE="measurements/$MODEL_NAME.json"
42
+
43
+ # Create the measurement file if needed
44
+ if [ ! -f "$MEASUREMENT_FILE" ]; then
45
+ echo "Creating $MEASUREMENT_FILE"
46
+ # Create directories
47
+ if [ -d "$OUTPUT_DIR" ]; then
48
+ rm -r "$OUTPUT_DIR"
49
+ fi
50
+ mkdir "$OUTPUT_DIR"
51
+
52
+ python convert.py -i $MODEL_DIR -o $OUTPUT_DIR -nr -om $MEASUREMENT_FILE
53
+ fi
54
+
55
+ # Choose one of the below. Either create a single quant for testing or a batch of them.
56
+ # BIT_PRECISIONS=(2.25)
57
+ BIT_PRECISIONS=(5.0 4.5 4.0 3.5 3.0 2.75 2.5 2.25)
58
+
59
+ for BIT_PRECISION in "${BIT_PRECISIONS[@]}"
60
+ do
61
+ CONVERTED_FOLDER="models/${MODEL_NAME}_exl2_${BIT_PRECISION}bpw"
62
+
63
+ # If it doesn't already exist, make the quant
64
+ if [ ! -d "$CONVERTED_FOLDER" ]; then
65
+
66
+ echo "Creating $CONVERTED_FOLDER"
67
+
68
+ # Create directories
69
+ if [ -d "$OUTPUT_DIR" ]; then
70
+ rm -r "$OUTPUT_DIR"
71
+ fi
72
+ mkdir "$OUTPUT_DIR"
73
+ mkdir "$CONVERTED_FOLDER"
74
+
75
+ # Run conversion commands
76
+ python convert.py -i $MODEL_DIR -o $OUTPUT_DIR -nr -m $MEASUREMENT_FILE -b $BIT_PRECISION -cf $CONVERTED_FOLDER
77
+
78
+ fi
79
+ done
80
+ ```
config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 6144,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 16384,
13
+ "max_position_embeddings": 65536,
14
+ "model_type": "mixtral",
15
+ "num_attention_heads": 48,
16
+ "num_experts_per_tok": 2,
17
+ "num_hidden_layers": 56,
18
+ "num_key_value_heads": 8,
19
+ "num_local_experts": 8,
20
+ "output_router_logits": false,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_theta": 1000000,
23
+ "router_aux_loss_coef": 0.001,
24
+ "router_jitter_noise": 0.0,
25
+ "sliding_window": null,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.36.2",
29
+ "use_cache": false,
30
+ "vocab_size": 32000,
31
+ "quantization_config": {
32
+ "quant_method": "exl2",
33
+ "version": "0.0.18",
34
+ "bits": 6.0,
35
+ "head_bits": 6,
36
+ "calibration": {
37
+ "rows": 100,
38
+ "length": 2048,
39
+ "dataset": "(default)"
40
+ }
41
+ }
42
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.36.2"
6
+ }
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
output-00001-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e6142e86d23fe8fba5c816687401c85cb43a092251345b51e8b85a3c7f5eea3
3
+ size 8588528752
output-00002-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97ca3bbfb86766dfd29460ef234e34a2c7fec38636953d5c9dfaa407a0fe20f0
3
+ size 8571434664
output-00003-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25b9ec24ddcf528f317538eaba7054bdd95d8f323c32bcf2d7313b7eb8de5373
3
+ size 8589748144
output-00004-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f32cbc3717ec062c17e0303abe33bb6465bece26c705d33cc9bb05f9faf92028
3
+ size 8562093048
output-00005-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8902422ee7563ac5c5934f444f75578c9587af5ada5523e2ceaa29af1c7048a2
3
+ size 8545072456
output-00006-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b07856cad4eede314ba2dd6b5f0f0f43e2d104d9359c02ba61ca9a8c91b55055
3
+ size 8522328432
output-00007-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b32a567c835f303915966a797f80941f72cd25eb98bdcd2e098e1de2e14c62de
3
+ size 8560897360
output-00008-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0de601048af07ee5b8c8452e22e0adc26c3eec97bc03ec5b7daed4afdbd6ae6e
3
+ size 8589995576
output-00009-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:004a25262f4f93070a59d6d1da366bcbbfe386358590abd5cb2c259acc72afb4
3
+ size 8564813008
output-00010-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87f62cabe3ac70ebf392633b033d459d3630db4b22438a8195d91df08a4025dc
3
+ size 8549846264
output-00011-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15d57b1df8e7b829b6d8eecfcd285af206b103d0568719cd5d9ba866ac21f02e
3
+ size 8551482560
output-00012-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a569d97acc3aa81658c52392d0e17f9d1b09945447f7ed80cfd193eb1a3f3b9
3
+ size 8551482560
output-00013-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34f81a6137de5f8b4f76550097a28a703ce9e56d27a0fb2f6aeae5af8649b341
3
+ size 2973393352
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": true,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": "<unk>",
37
+ "padding_side": "right",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
43
+ }