prince-canuma commited on
Commit
fb4021f
1 Parent(s): 25466a4

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -179,10 +179,7 @@ extra_gated_button_content: Submit
179
  ---
180
 
181
  # mlx-community/Meta-Llama-3-70B-4bit
182
- This model was converted to MLX format from [`meta-llama/Meta-Llama-3-70B`]() using mlx-lm version **0.9.0**.
183
-
184
- Model added by [Prince Canuma](https://twitter.com/Prince_Canuma).
185
-
186
  Refer to the [original model card](https://huggingface.co/meta-llama/Meta-Llama-3-70B) for more details on the model.
187
  ## Use with mlx
188
 
 
179
  ---
180
 
181
  # mlx-community/Meta-Llama-3-70B-4bit
182
+ This model was converted to MLX format from [`meta-llama/Meta-Llama-3-70B`]() using mlx-lm version **0.10.0**.
 
 
 
183
  Refer to the [original model card](https://huggingface.co/meta-llama/Meta-Llama-3-70B) for more details on the model.
184
  ## Use with mlx
185
 
config.json CHANGED
@@ -1,85 +1,31 @@
1
  {
2
- "add_cross_attention": false,
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
- "bad_words_ids": null,
9
- "begin_suppress_tokens": null,
10
  "bos_token_id": 128000,
11
- "chunk_size_feed_forward": 0,
12
- "cross_attention_hidden_size": null,
13
- "decoder_start_token_id": null,
14
- "diversity_penalty": 0.0,
15
- "do_sample": false,
16
- "early_stopping": false,
17
- "encoder_no_repeat_ngram_size": 0,
18
  "eos_token_id": 128001,
19
- "exponential_decay_length_penalty": null,
20
- "finetuning_task": null,
21
- "forced_bos_token_id": null,
22
- "forced_eos_token_id": null,
23
  "hidden_act": "silu",
24
  "hidden_size": 8192,
25
- "id2label": {
26
- "0": "LABEL_0",
27
- "1": "LABEL_1"
28
- },
29
  "initializer_range": 0.02,
30
  "intermediate_size": 28672,
31
- "is_decoder": false,
32
- "is_encoder_decoder": false,
33
- "label2id": {
34
- "LABEL_0": 0,
35
- "LABEL_1": 1
36
- },
37
- "length_penalty": 1.0,
38
- "max_length": 20,
39
  "max_position_embeddings": 8192,
40
- "min_length": 0,
41
  "model_type": "llama",
42
- "no_repeat_ngram_size": 0,
43
  "num_attention_heads": 64,
44
- "num_beam_groups": 1,
45
- "num_beams": 1,
46
  "num_hidden_layers": 80,
47
  "num_key_value_heads": 8,
48
- "num_return_sequences": 1,
49
- "output_attentions": false,
50
- "output_hidden_states": false,
51
- "output_scores": false,
52
- "pad_token_id": null,
53
- "prefix": null,
54
  "pretraining_tp": 1,
55
- "problem_type": null,
56
- "pruned_heads": {},
57
  "quantization": {
58
  "group_size": 64,
59
  "bits": 4
60
  },
61
- "remove_invalid_values": false,
62
- "repetition_penalty": 1.0,
63
- "return_dict": true,
64
- "return_dict_in_generate": false,
65
  "rms_norm_eps": 1e-05,
66
  "rope_scaling": null,
67
  "rope_theta": 500000.0,
68
- "sep_token_id": null,
69
- "suppress_tokens": null,
70
- "task_specific_params": null,
71
- "temperature": 1.0,
72
- "tf_legacy_loss": false,
73
- "tie_encoder_decoder": false,
74
  "tie_word_embeddings": false,
75
- "tokenizer_class": null,
76
- "top_k": 50,
77
- "top_p": 1.0,
78
  "torch_dtype": "bfloat16",
79
- "torchscript": false,
80
- "transformers_version": "4.40.0",
81
- "typical_p": 1.0,
82
- "use_bfloat16": false,
83
  "use_cache": true,
84
  "vocab_size": 128256
85
  }
 
1
  {
 
2
  "architectures": [
3
  "LlamaForCausalLM"
4
  ],
5
  "attention_bias": false,
6
  "attention_dropout": 0.0,
 
 
7
  "bos_token_id": 128000,
 
 
 
 
 
 
 
8
  "eos_token_id": 128001,
 
 
 
 
9
  "hidden_act": "silu",
10
  "hidden_size": 8192,
 
 
 
 
11
  "initializer_range": 0.02,
12
  "intermediate_size": 28672,
 
 
 
 
 
 
 
 
13
  "max_position_embeddings": 8192,
 
14
  "model_type": "llama",
 
15
  "num_attention_heads": 64,
 
 
16
  "num_hidden_layers": 80,
17
  "num_key_value_heads": 8,
 
 
 
 
 
 
18
  "pretraining_tp": 1,
 
 
19
  "quantization": {
20
  "group_size": 64,
21
  "bits": 4
22
  },
 
 
 
 
23
  "rms_norm_eps": 1e-05,
24
  "rope_scaling": null,
25
  "rope_theta": 500000.0,
 
 
 
 
 
 
26
  "tie_word_embeddings": false,
 
 
 
27
  "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.40.0.dev0",
 
 
 
29
  "use_cache": true,
30
  "vocab_size": 128256
31
  }
model-00001-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c371e2744853b0c80b59cc12fa7e100834b46588022240e1d40cbda2ff48146
3
- size 5338514834
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dab467ad382aa2f59271f2fa6774ce2a697e275e62745739b299a01e97c0c382
3
+ size 5272167770
model-00002-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ecb11d77bde98dacf774dd82f3debd654de0967a57565e98b31629e1fb700cf9
3
- size 5294649665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faa98efc51b0c67f028c0c758af9694da56db1e503a98b85989117febe1c184a
3
+ size 5294649694
model-00003-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d644b5f444beef852f94e7898971e9d9e3523db9f9a1e7978280ba62ad55143e
3
- size 5294649735
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c450d9ebb6a46425aafb691c0db687b10c735ca8752d45688b18a0b297ecbb2
3
+ size 5294649717
model-00004-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:39bae497ebf814175f2641b05640499e71f060ce06e13f1c99b76f3632aec945
3
- size 5294649737
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce8219070204d955d67799a38e07e5943d2567ef0ab6ac3a4b83229117562c65
3
+ size 5294649733
model-00005-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:560201402eaa5f8a211f7fc7d0e3d6778213a24cbb00947a8dd03345bf98ef35
3
- size 5294649745
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9efa67446580116f8b42bbb37a9a6e85cccd9639279f4c18f8b17f36509ffc40
3
+ size 5294649717
model-00006-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:864b0e58e26962327194c0640e58dcfffb2a8a96d72c960c55750a8bc878b515
3
- size 5294649715
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30ab52f010017b3f54e306b44910ab4db12d00f67e7360cfb6f863c62f0a4781
3
+ size 5294649733
model-00007-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0dd6abfbe77fc095c8cc9378458e425c2bbfef375244db679eb96bf9c391a4fb
3
- size 5294649715
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecb9177c13ed9d1a4dff4cfc45029ee5691772d1f93c9ee9307110e105f25fb9
3
+ size 5294649739
model-00008-of-00008.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb66e20a4a03e187398f2afe490fe24888580b96a2cb5ebf2e92b5f4f83518bd
3
- size 4092496847
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97ad02e3eb5ab4354de4ad61a0ece132195fdfefcdb7c11ee7afc76378a26a75
3
+ size 2648501502
model.safetensors.index.json CHANGED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json CHANGED
@@ -7,7 +7,7 @@
7
  "single_word": false
8
  },
9
  "eos_token": {
10
- "content": "<|eot_id|>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
 
7
  "single_word": false
8
  },
9
  "eos_token": {
10
+ "content": "<|end_of_text|>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
tokenizer.json CHANGED
@@ -2348,6 +2348,7 @@
2348
  "end_of_word_suffix": null,
2349
  "fuse_unk": false,
2350
  "byte_fallback": false,
 
2351
  "vocab": {
2352
  "!": 0,
2353
  "\"": 1,
 
2348
  "end_of_word_suffix": null,
2349
  "fuse_unk": false,
2350
  "byte_fallback": false,
2351
+ "ignore_merges": false,
2352
  "vocab": {
2353
  "!": 0,
2354
  "\"": 1,
tokenizer_config.json CHANGED
@@ -2050,9 +2050,8 @@
2050
  }
2051
  },
2052
  "bos_token": "<|begin_of_text|>",
2053
- "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}",
2054
  "clean_up_tokenization_spaces": true,
2055
- "eos_token": "<|eot_id|>",
2056
  "model_input_names": [
2057
  "input_ids",
2058
  "attention_mask"
 
2050
  }
2051
  },
2052
  "bos_token": "<|begin_of_text|>",
 
2053
  "clean_up_tokenization_spaces": true,
2054
+ "eos_token": "<|end_of_text|>",
2055
  "model_input_names": [
2056
  "input_ids",
2057
  "attention_mask"