MLX
Safetensors
qwen2
Generated from Trainer
axolotl
3thn commited on
Commit
2439caa
1 Parent(s): c7a6ca1

Upload folder using huggingface_hub (#1)

Browse files

- 5fc8064bca4ded3721fd03506cf49d3ad44f98593279e5d4bf995f55c2b5258d (61e53461d358a4bc3fc84e293beb83877441f794)
- ddd78a36a8114d68d21a7d558ff23fa8458545cd5794a3a367ad62c7cf749d04 (9be09026f312d74e31279e78eccd0e8e0c960489)
- 9ace46220958bbdbfb6229ea38b384630d8d26ec7ba92d76473cf1842ed330a1 (40a6752631b1ce61b7ea36a8158c09c4174dd4ad)
- b46e5616e11b70daf49986fc574743e77b7d6ce3fc2e3084377b9cf386d9e688 (c324e97eab0c3e0364cad6ea2d061b8bc9a45452)
- 085b901f981551e3da6518313d3eb3e2d23ffa284256958e2135a5a719726033 (007b0217e8a24a4f0b3a7b021776d6aef1e76e2f)
- 1d58afd72c8c00a04e91c47c1ae190a1ee43cbad433084aef02d8a03b804a059 (541ec3ce6bab4df391dd4d9e88bd7fcb7aa0a590)
- a9504107bc276c512bc030ae5da73c902e419c4aad5cd9f30741cab51ecc1adb (aecf849ca6259c905c144507603d4417a60370ff)
- 88ec724f96e6c4f997be5378b276196f895cb572eba37bb0d89f7198ff0d8a11 (fc75c3a23b3f38015c2ff8e21779e8581d8af02d)
- 70eea210611ac404c535429356d65fba33c6d90a5d38b2540d540afb8fe85016 (23efd2b962d3df6eccfc63ce5125809bb23cfa99)
- bf5de37edfde5125e8e6105d8a98d247ff762c614ab57c4575b802bc5b1b347e (d9a7d4daa0483f018e644621b2567f09a76f0e2d)
- e403ad39b7920cc2a0952ba28b833935f0428de9f041f78ccbf80688a4b33a7d (b680c5fd63dd315dec4b09ae70507bca2bdf526a)
- 329206252796be189d385b98b2a511f6316175d750416348bf16bc22b360a24c (2019e3cc0f419dbe43b09b0fdffc6ee8f586509d)
- eed6fb45b348a1b4148fbcfabb22c4b6ccfe482259edb465e8c99411023c0bc8 (f89b10ea2317293dc175d8ce92604e6be75ff235)

README.md ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ tags:
4
+ - generated_from_trainer
5
+ - axolotl
6
+ - mlx
7
+ base_model: Qwen/Qwen1.5-110B
8
+ datasets:
9
+ - cognitivecomputations/Dolphin-2.9
10
+ - teknium/OpenHermes-2.5
11
+ - m-a-p/CodeFeedback-Filtered-Instruction
12
+ - cognitivecomputations/dolphin-coder
13
+ - cognitivecomputations/samantha-data
14
+ - microsoft/orca-math-word-problems-200k
15
+ - Locutusque/function-calling-chatml
16
+ - internlm/Agent-FLAN
17
+ ---
18
+
19
+ # mlx-community/dolphin-2.9.1-qwen-110b-4bit
20
+ This model was converted to MLX format from [`cognitivecomputations/dolphin-2.9.1-qwen-110b`]() using mlx-lm version **0.12.1**.
21
+ Refer to the [original model card](https://huggingface.co/cognitivecomputations/dolphin-2.9.1-qwen-110b) for more details on the model.
22
+ ## Use with mlx
23
+
24
+ ```bash
25
+ pip install mlx-lm
26
+ ```
27
+
28
+ ```python
29
+ from mlx_lm import load, generate
30
+
31
+ model, tokenizer = load("mlx-community/dolphin-2.9.1-qwen-110b-4bit")
32
+ response = generate(model, tokenizer, prompt="hello", verbose=True)
33
+ ```
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "eos_token_id": 151645,
7
+ "hidden_act": "silu",
8
+ "hidden_size": 8192,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 49152,
11
+ "max_position_embeddings": 32768,
12
+ "max_window_layers": 28,
13
+ "model_type": "qwen2",
14
+ "num_attention_heads": 64,
15
+ "num_hidden_layers": 80,
16
+ "num_key_value_heads": 8,
17
+ "quantization": {
18
+ "group_size": 64,
19
+ "bits": 4
20
+ },
21
+ "rms_norm_eps": 1e-06,
22
+ "rope_theta": 1000000.0,
23
+ "sliding_window": 32768,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.40.0.dev0",
27
+ "use_cache": false,
28
+ "use_sliding_window": false,
29
+ "vocab_size": 152064
30
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:390df73b6138b749afc72fd1a9993523121d59e5cfb84eb4650daf63dbb92a3c
3
+ size 5368282084
model-00002-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:031ded0f2b2328e901b6d134afe0a08e9fa082f160196385573956acfa65f56e
3
+ size 5355471262
model-00003-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20be119094004496372405e4324ea02d3fa880c20d7d42418a435d6e73112e32
3
+ size 5351276819
model-00004-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965719e8f0c943db2a07171ce276c22d5757730653c2c61b008cd62a6c23f698
3
+ size 5351276851
model-00005-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aee77730f704ad8ee1447764c9f7dead8d8e3fe794a736ed42ee0a3f2cd545a2
3
+ size 5351276863
model-00006-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8d5d887fa2b95f4046296d7ee0e621bc62ac0f60c17b231691c3c90b0f412be
3
+ size 5351276859
model-00007-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1798b8a707e77e8545e39c0dc1baa4d0819f85b255ebf1d50a32db2b5b3a7257
3
+ size 5351276843
model-00008-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:399e6a3e27c89d8b135c3c6e3a94e010a9337d9113f472e769ca2b6be6afb672
3
+ size 5351276845
model-00009-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37a3eab35898fad7b803cafa6c7b2563c58f0ddda4eff92c51b6b12d86e96a1c
3
+ size 5351276841
model-00010-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:494851cabf89d358b5c842a3f0dd483bf6075883067dd794353f2c41e07cb97b
3
+ size 5351276849
model-00011-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ceb9264d84502c7bca05f0cab9a2cf038ba095aa8cb726f05db92014dc0e3539
3
+ size 5351276827
model-00012-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faae0612aad67864de1a30fa3a2d190409d7b9e3cd038bf6ebb8d03b6aa97e9a
3
+ size 3673643280
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null
43
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff