prince-canuma commited on
Commit
b12edf6
1 Parent(s): 8fa3eea

Upload folder using huggingface_hub (#1)

Browse files

- 761da38826a5794360b849fcc30f000e87362ea6c1192601617ab7d787748ebe (a297039386483e52743c392dbb136fb389a7e0cf)
- e05a39d5d85e41bfad5ffdd86688374801d37f53115fa0db6a5e8d102905e0b4 (f0818feb61b42d8c158b61de3f17e33178d59b81)
- 91923146ab1eb5c9b12e3a3441f5ef9ab942318612176dda08994de308f93e60 (e1163f9bb6ae6a3fccab94937e9410899842cb48)
- f4d9a00eb76ac27c2f3b867206daf5087b06bea3e13bf7f32d61a9bd166efb9f (93cda5d56426f21eb197db4cc21ae61defe5481d)
- c260ca85da949c0138dc0a8c95975eac249e56a5a741076574b23eed3250619a (00115cc431c5722f743974749cb92d029b4eb9b1)
- 5d5c4a31bb2c7b7349fd8ec31b6a94a1761b457ae5fcefe6e0f770846b5d4767 (d91d527d711d407a8dcfd19d827d5c985f9beaa6)
- ca784bc60832e2aa4eecc1bbf6f3084233e61ed1089ef52a87854275cb0a13e1 (4f4b09591656016d780abcda360dcd5c0ecf2a10)
- 9ef9a3605aa08d41bff053d209c6aab43d2f0112028ad00c4ba4ee70638c2c18 (9e600cf1c5252afb6f053fd52167b9b462e6c558)
- 135044d2f10d91f4fd11013c6ba095b9904390202d7a8f87dd16822426d5cf03 (2e5ab692a67e57419d9bc744c7c05b6d3a6cd81c)

README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: other
5
+ tags:
6
+ - pretrained
7
+ - mlx
8
+ license_name: tongyi-qianwen
9
+ license_link: https://huggingface.co/Qwen/Qwen2-72B/blob/main/LICENSE
10
+ pipeline_tag: text-generation
11
+ ---
12
+
13
+ # mlx-community/Qwen2-72B-4bit
14
+
15
+ The Model [mlx-community/Qwen2-72B-4bit](https://huggingface.co/mlx-community/Qwen2-72B-4bit) was converted to MLX format from [Qwen/Qwen2-72B](https://huggingface.co/Qwen/Qwen2-72B) using mlx-lm version **0.14.2**.
16
+
17
+ ## Use with mlx
18
+
19
+ ```bash
20
+ pip install mlx-lm
21
+ ```
22
+
23
+ ```python
24
+ from mlx_lm import load, generate
25
+
26
+ model, tokenizer = load("mlx-community/Qwen2-72B-4bit")
27
+ response = generate(model, tokenizer, prompt="hello", verbose=True)
28
+ ```
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151643,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 8192,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 29568,
12
+ "max_position_embeddings": 131072,
13
+ "max_window_layers": 80,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 64,
16
+ "num_hidden_layers": 80,
17
+ "num_key_value_heads": 8,
18
+ "quantization": {
19
+ "group_size": 64,
20
+ "bits": 4
21
+ },
22
+ "rms_norm_eps": 1e-05,
23
+ "rope_theta": 1000000.0,
24
+ "sliding_window": 131072,
25
+ "tie_word_embeddings": false,
26
+ "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.40.1",
28
+ "use_cache": true,
29
+ "use_sliding_window": false,
30
+ "vocab_size": 152064
31
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f90b5abb9252f66aa11245eb907665722bfc3390e63472cca94f407da02f9fbe
3
+ size 5365567671
model-00002-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44673eba7ef4f36da3fddee832797e8a92d9762de0287c0a91b0b10e2d824608
3
+ size 5294878244
model-00003-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1a23008a6745ab6e1663e3a3c6021012c4387c82a10d77bc89ec04a60528d57
3
+ size 5346171127
model-00004-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f49f98a7265f01493e2cbe196e1e4d28c7b02bb9108dc5794cd4bedfa9d20d7
3
+ size 5294845211
model-00005-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de457681fd2d635c37ca64013b9ced8bbcf70b48edc1c0483bd049640146fecc
3
+ size 5294878277
model-00006-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc807945fb0b57ce2d83ac8f444955643d1fe44fbc084dd00435f2022839d77a
3
+ size 5294878236
model-00007-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7079d3726d331c9254016d47f4b2cbc787eaeb7c6bbf0e13425623d04af27dc
3
+ size 5346171153
model-00008-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:709bee3f048fd47bb6322f39e3d775d329b8b7c3020fb07d77135c0c9a52a652
3
+ size 3663161114
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|endoftext|>",
37
+ "errors": "replace",
38
+ "model_max_length": 131072,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null
43
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff