jtatman commited on
Commit
8fa921b
1 Parent(s): a7b34e5

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - moe
5
+ - frankenmoe
6
+ - merge
7
+ - mergekit
8
+ - lazymergekit
9
+ - TinyLlama/TinyLlama-1.1B-intermediate-step-715k-1.5T
10
+ - cognitivecomputations/TinyDolphin-2.8.2-1.1b-laser
11
+ - cognitivecomputations/TinyDolphin-2.8.1-1.1b
12
+ - TinyLlama/TinyLlama-1.1B-Chat-v1.0
13
+ base_model:
14
+ - TinyLlama/TinyLlama-1.1B-intermediate-step-715k-1.5T
15
+ - cognitivecomputations/TinyDolphin-2.8.2-1.1b-laser
16
+ - cognitivecomputations/TinyDolphin-2.8.1-1.1b
17
+ - TinyLlama/TinyLlama-1.1B-Chat-v1.0
18
+ ---
19
+
20
+ # Tiny-Llama-Llama-Dolphin-laser-1b-moe
21
+
22
+ Tiny-Llama-Llama-Dolphin-laser-1b-moe is a Mixure of Experts (MoE) made with the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
23
+ * [TinyLlama/TinyLlama-1.1B-intermediate-step-715k-1.5T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-715k-1.5T)
24
+ * [cognitivecomputations/TinyDolphin-2.8.2-1.1b-laser](https://huggingface.co/cognitivecomputations/TinyDolphin-2.8.2-1.1b-laser)
25
+ * [cognitivecomputations/TinyDolphin-2.8.1-1.1b](https://huggingface.co/cognitivecomputations/TinyDolphin-2.8.1-1.1b)
26
+ * [TinyLlama/TinyLlama-1.1B-Chat-v1.0](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0)
27
+
28
+ ## 🧩 Configuration
29
+
30
+ ```yaml
31
+
32
+ base_model: cognitivecomputations/TinyDolphin-2.8.2-1.1b-laser
33
+ experts:
34
+ - source_model: TinyLlama/TinyLlama-1.1B-intermediate-step-715k-1.5T
35
+ positive_prompts:
36
+ - "Write a Python script that sorts a list of integers using the bubble sort algorithm."
37
+ - "Write a JavaScript function that redirects a web page to another page after 5 seconds."
38
+ negative_prompts:
39
+ - "Discuss the latest world events."
40
+ - "Narrate a fictional story about a knight's quest."
41
+ - source_model: cognitivecomputations/TinyDolphin-2.8.2-1.1b-laser
42
+ positive_prompts:
43
+ - "Describe the steps to troubleshoot a fluid dynamics issue with a water fountain."
44
+ - "If we have 3 marbles, and two roll under the counter, and one is found, how many marbles are there?"
45
+ negative_prompts:
46
+ - "Tell me about your favorite book."
47
+ - "Write a Python script that sorts a list of integers."
48
+ - source_model: cognitivecomputations/TinyDolphin-2.8.1-1.1b
49
+ positive_prompts:
50
+ - "Write a short story about a knight's quest to find a lost treasure, and then summarize it in one paragraph."
51
+ - "Summarize the following article with details and clarity."
52
+ negative_prompts:
53
+ - "Give me a sample of code in Rust."
54
+ - "Describe the steps to troubleshoot a fluid dynamics issue."
55
+ - source_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
56
+ positive_prompts:
57
+ - "Tell me about your favorite book and why you like it."
58
+ - "Chat with me about something I've been thinking of."
59
+ negative_prompts:
60
+ - "Write a Python script that sorts a list of integers."
61
+ - "Summarize the following article with details and clarity."
62
+ gate_mode: hidden
63
+
64
+ ```
65
+
66
+ ## 💻 Usage
67
+
68
+ ```python
69
+ !pip install -qU transformers bitsandbytes accelerate
70
+
71
+ from transformers import AutoTokenizer
72
+ import transformers
73
+ import torch
74
+
75
+ model = "jtatman/Tiny-Llama-Llama-Dolphin-laser-1b-moe"
76
+
77
+ tokenizer = AutoTokenizer.from_pretrained(model)
78
+ pipeline = transformers.pipeline(
79
+ "text-generation",
80
+ model=model,
81
+ model_kwargs={"torch_dtype": torch.float16, "load_in_4bit": True},
82
+ )
83
+
84
+ messages = [{"role": "user", "content": "Explain what a Mixture of Experts is in less than 100 words."}]
85
+ prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
86
+ outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
87
+ print(outputs[0]["generated_text"])
88
+ ```
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|im_end|>": 32000,
3
+ "<|im_start|>": 32001
4
+ }
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "cognitivecomputations/TinyDolphin-2.8.2-1.1b-laser",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 32000,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 2048,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5632,
14
+ "max_position_embeddings": 4096,
15
+ "model_type": "mixtral",
16
+ "num_attention_heads": 32,
17
+ "num_experts_per_tok": 2,
18
+ "num_hidden_layers": 22,
19
+ "num_key_value_heads": 4,
20
+ "num_local_experts": 4,
21
+ "output_router_logits": false,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-05,
24
+ "rope_scaling": null,
25
+ "rope_theta": 10000.0,
26
+ "router_aux_loss_coef": 0.001,
27
+ "sliding_window": null,
28
+ "tie_word_embeddings": false,
29
+ "torch_dtype": "bfloat16",
30
+ "transformers_version": "4.38.1",
31
+ "use_cache": true,
32
+ "vocab_size": 32002
33
+ }
mergekit_config.yml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ models:
3
+ - model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
4
+ parameters:
5
+ weight: 1.0
6
+ - model: cognitivecomputations/TinyDolphin-2.8.2-1.1b-laser
7
+ parameters:
8
+ weight: 1.0
9
+ - model: cognitivecomputations/TinyDolphin-2.8.1-1.1b
10
+ parameters:
11
+ weight: 0.4
12
+ - model: TinyLlama/TinyLlama-1.1B-intermediate-step-715k-1.5T
13
+ parameters:
14
+ weight: 0.6
15
+ merge_method: linear
16
+ dtype: float16
17
+
mergekit_moe_config.yml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ base_model: cognitivecomputations/TinyDolphin-2.8.2-1.1b-laser
4
+ experts:
5
+ - source_model: TinyLlama/TinyLlama-1.1B-intermediate-step-715k-1.5T
6
+ positive_prompts:
7
+ - "Write a Python script that sorts a list of integers using the bubble sort algorithm."
8
+ - "Write a JavaScript function that redirects a web page to another page after 5 seconds."
9
+ negative_prompts:
10
+ - "Discuss the latest world events."
11
+ - "Narrate a fictional story about a knight's quest."
12
+ - source_model: cognitivecomputations/TinyDolphin-2.8.2-1.1b-laser
13
+ positive_prompts:
14
+ - "Describe the steps to troubleshoot a fluid dynamics issue with a water fountain."
15
+ - "If we have 3 marbles, and two roll under the counter, and one is found, how many marbles are there?"
16
+ negative_prompts:
17
+ - "Tell me about your favorite book."
18
+ - "Write a Python script that sorts a list of integers."
19
+ - source_model: cognitivecomputations/TinyDolphin-2.8.1-1.1b
20
+ positive_prompts:
21
+ - "Write a short story about a knight's quest to find a lost treasure, and then summarize it in one paragraph."
22
+ - "Summarize the following article with details and clarity."
23
+ negative_prompts:
24
+ - "Give me a sample of code in Rust."
25
+ - "Describe the steps to troubleshoot a fluid dynamics issue."
26
+ - source_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
27
+ positive_prompts:
28
+ - "Tell me about your favorite book and why you like it."
29
+ - "Chat with me about something I've been thinking of."
30
+ negative_prompts:
31
+ - "Write a Python script that sorts a list of integers."
32
+ - "Summarize the following article with details and clarity."
33
+ gate_mode: hidden
34
+
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6f19c784a94b32715afcfd6518b849f421707d79dc9eaaba831a38de3cd7589
3
+ size 1980957928
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aea232972c2eb28d084378eabc326eb71bc45fc167d7d50f464902515ba6bfd4
3
+ size 1990280400
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d8acde4a95ad9c6a796cc4fddd1590953abc299d6a564544985ce4fa2da0c0f
3
+ size 219161720
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa708c185f0ca1287bbc2a3573969c98d1a744f80f99d32c6f45b0d52fb75f42
3
+ size 1977686920
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73800092a14f9c7192c27e13dbb190fb89cbeae0a54ec65a93f1f280aa8955b6
3
+ size 1977687000
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79b126414c8d1bdf9ff37065d28d7ccf3432a2026a99b47155e29d912ab41ec9
3
+ size 822468720
model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"mergekit_version": "0.0.4.1"}, "weight_map": {"model.embed_tokens.weight": "model-00001-of-00004.safetensors", "model.norm.weight": "model-00001-of-00004.safetensors", "lm_head.weight": "model-00001-of-00004.safetensors", "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.0.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00004.safetensors", "model.layers.0.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00004.safetensors", "model.layers.0.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00004.safetensors", "model.layers.0.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00004.safetensors", "model.layers.0.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00004.safetensors", "model.layers.0.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00004.safetensors", "model.layers.0.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00004.safetensors", "model.layers.0.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00004.safetensors", "model.layers.0.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00004.safetensors", "model.layers.0.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00004.safetensors", "model.layers.0.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00004.safetensors", "model.layers.0.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00004.safetensors", "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.1.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00004.safetensors", "model.layers.1.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00004.safetensors", "model.layers.1.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00004.safetensors", "model.layers.1.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00004.safetensors", "model.layers.1.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00004.safetensors", "model.layers.1.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00004.safetensors", "model.layers.1.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00004.safetensors", "model.layers.1.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00004.safetensors", "model.layers.1.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00004.safetensors", "model.layers.1.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00004.safetensors", "model.layers.1.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00004.safetensors", "model.layers.1.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00004.safetensors", "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.2.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00004.safetensors", "model.layers.2.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00004.safetensors", "model.layers.2.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00004.safetensors", "model.layers.2.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00004.safetensors", "model.layers.2.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00004.safetensors", "model.layers.2.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00004.safetensors", "model.layers.2.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00004.safetensors", "model.layers.2.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00004.safetensors", "model.layers.2.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00004.safetensors", "model.layers.2.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00004.safetensors", "model.layers.2.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00004.safetensors", "model.layers.2.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00004.safetensors", "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.3.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00004.safetensors", "model.layers.3.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00004.safetensors", "model.layers.3.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00004.safetensors", "model.layers.3.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00004.safetensors", "model.layers.3.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00004.safetensors", "model.layers.3.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00004.safetensors", "model.layers.3.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00004.safetensors", "model.layers.3.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00004.safetensors", "model.layers.3.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00004.safetensors", "model.layers.3.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00004.safetensors", "model.layers.3.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00004.safetensors", "model.layers.3.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00004.safetensors", "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.4.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00004.safetensors", "model.layers.4.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00004.safetensors", "model.layers.4.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00004.safetensors", "model.layers.4.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00004.safetensors", "model.layers.4.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00004.safetensors", "model.layers.4.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00004.safetensors", "model.layers.4.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00004.safetensors", "model.layers.4.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00004.safetensors", "model.layers.4.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00004.safetensors", "model.layers.4.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00004.safetensors", "model.layers.4.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00004.safetensors", "model.layers.4.block_sparse_moe.experts.3.w2.weight": "model-00001-of-00004.safetensors", "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.5.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00004.safetensors", "model.layers.5.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00004.safetensors", "model.layers.5.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00004.safetensors", "model.layers.5.block_sparse_moe.experts.3.w3.weight": "model-00001-of-00004.safetensors", "model.layers.5.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00004.safetensors", "model.layers.5.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00004.safetensors", "model.layers.5.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00004.safetensors", "model.layers.5.block_sparse_moe.experts.3.w1.weight": "model-00001-of-00004.safetensors", "model.layers.5.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00004.safetensors", "model.layers.5.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00004.safetensors", "model.layers.5.block_sparse_moe.experts.2.w2.weight": "model-00002-of-00004.safetensors", "model.layers.5.block_sparse_moe.experts.3.w2.weight": "model-00002-of-00004.safetensors", "model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.6.block_sparse_moe.experts.0.w3.weight": "model-00002-of-00004.safetensors", "model.layers.6.block_sparse_moe.experts.1.w3.weight": "model-00002-of-00004.safetensors", "model.layers.6.block_sparse_moe.experts.2.w3.weight": "model-00002-of-00004.safetensors", "model.layers.6.block_sparse_moe.experts.3.w3.weight": "model-00002-of-00004.safetensors", "model.layers.6.block_sparse_moe.experts.0.w1.weight": "model-00002-of-00004.safetensors", "model.layers.6.block_sparse_moe.experts.1.w1.weight": "model-00002-of-00004.safetensors", "model.layers.6.block_sparse_moe.experts.2.w1.weight": "model-00002-of-00004.safetensors", "model.layers.6.block_sparse_moe.experts.3.w1.weight": "model-00002-of-00004.safetensors", "model.layers.6.block_sparse_moe.experts.0.w2.weight": "model-00002-of-00004.safetensors", "model.layers.6.block_sparse_moe.experts.1.w2.weight": "model-00002-of-00004.safetensors", "model.layers.6.block_sparse_moe.experts.2.w2.weight": "model-00002-of-00004.safetensors", "model.layers.6.block_sparse_moe.experts.3.w2.weight": "model-00002-of-00004.safetensors", "model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.7.block_sparse_moe.experts.0.w3.weight": "model-00002-of-00004.safetensors", "model.layers.7.block_sparse_moe.experts.1.w3.weight": "model-00002-of-00004.safetensors", "model.layers.7.block_sparse_moe.experts.2.w3.weight": "model-00002-of-00004.safetensors", "model.layers.7.block_sparse_moe.experts.3.w3.weight": "model-00002-of-00004.safetensors", "model.layers.7.block_sparse_moe.experts.0.w1.weight": "model-00002-of-00004.safetensors", "model.layers.7.block_sparse_moe.experts.1.w1.weight": "model-00002-of-00004.safetensors", "model.layers.7.block_sparse_moe.experts.2.w1.weight": "model-00002-of-00004.safetensors", "model.layers.7.block_sparse_moe.experts.3.w1.weight": "model-00002-of-00004.safetensors", "model.layers.7.block_sparse_moe.experts.0.w2.weight": "model-00002-of-00004.safetensors", "model.layers.7.block_sparse_moe.experts.1.w2.weight": "model-00002-of-00004.safetensors", "model.layers.7.block_sparse_moe.experts.2.w2.weight": "model-00002-of-00004.safetensors", "model.layers.7.block_sparse_moe.experts.3.w2.weight": "model-00002-of-00004.safetensors", "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.8.block_sparse_moe.experts.0.w3.weight": "model-00002-of-00004.safetensors", "model.layers.8.block_sparse_moe.experts.1.w3.weight": "model-00002-of-00004.safetensors", "model.layers.8.block_sparse_moe.experts.2.w3.weight": "model-00002-of-00004.safetensors", "model.layers.8.block_sparse_moe.experts.3.w3.weight": "model-00002-of-00004.safetensors", "model.layers.8.block_sparse_moe.experts.0.w1.weight": "model-00002-of-00004.safetensors", "model.layers.8.block_sparse_moe.experts.1.w1.weight": "model-00002-of-00004.safetensors", "model.layers.8.block_sparse_moe.experts.2.w1.weight": "model-00002-of-00004.safetensors", "model.layers.8.block_sparse_moe.experts.3.w1.weight": "model-00002-of-00004.safetensors", "model.layers.8.block_sparse_moe.experts.0.w2.weight": "model-00002-of-00004.safetensors", "model.layers.8.block_sparse_moe.experts.1.w2.weight": "model-00002-of-00004.safetensors", "model.layers.8.block_sparse_moe.experts.2.w2.weight": "model-00002-of-00004.safetensors", "model.layers.8.block_sparse_moe.experts.3.w2.weight": "model-00002-of-00004.safetensors", "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.9.block_sparse_moe.experts.0.w3.weight": "model-00002-of-00004.safetensors", "model.layers.9.block_sparse_moe.experts.1.w3.weight": "model-00002-of-00004.safetensors", "model.layers.9.block_sparse_moe.experts.2.w3.weight": "model-00002-of-00004.safetensors", "model.layers.9.block_sparse_moe.experts.3.w3.weight": "model-00002-of-00004.safetensors", "model.layers.9.block_sparse_moe.experts.0.w1.weight": "model-00002-of-00004.safetensors", "model.layers.9.block_sparse_moe.experts.1.w1.weight": "model-00002-of-00004.safetensors", "model.layers.9.block_sparse_moe.experts.2.w1.weight": "model-00002-of-00004.safetensors", "model.layers.9.block_sparse_moe.experts.3.w1.weight": "model-00002-of-00004.safetensors", "model.layers.9.block_sparse_moe.experts.0.w2.weight": "model-00002-of-00004.safetensors", "model.layers.9.block_sparse_moe.experts.1.w2.weight": "model-00002-of-00004.safetensors", "model.layers.9.block_sparse_moe.experts.2.w2.weight": "model-00002-of-00004.safetensors", "model.layers.9.block_sparse_moe.experts.3.w2.weight": "model-00002-of-00004.safetensors", "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.10.block_sparse_moe.experts.0.w3.weight": "model-00002-of-00004.safetensors", "model.layers.10.block_sparse_moe.experts.1.w3.weight": "model-00002-of-00004.safetensors", "model.layers.10.block_sparse_moe.experts.2.w3.weight": "model-00002-of-00004.safetensors", "model.layers.10.block_sparse_moe.experts.3.w3.weight": "model-00002-of-00004.safetensors", "model.layers.10.block_sparse_moe.experts.0.w1.weight": "model-00002-of-00004.safetensors", "model.layers.10.block_sparse_moe.experts.1.w1.weight": "model-00002-of-00004.safetensors", "model.layers.10.block_sparse_moe.experts.2.w1.weight": "model-00002-of-00004.safetensors", "model.layers.10.block_sparse_moe.experts.3.w1.weight": "model-00002-of-00004.safetensors", "model.layers.10.block_sparse_moe.experts.0.w2.weight": "model-00002-of-00004.safetensors", "model.layers.10.block_sparse_moe.experts.1.w2.weight": "model-00002-of-00004.safetensors", "model.layers.10.block_sparse_moe.experts.2.w2.weight": "model-00002-of-00004.safetensors", "model.layers.10.block_sparse_moe.experts.3.w2.weight": "model-00002-of-00004.safetensors", "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.11.block_sparse_moe.experts.0.w3.weight": "model-00002-of-00004.safetensors", "model.layers.11.block_sparse_moe.experts.1.w3.weight": "model-00002-of-00004.safetensors", "model.layers.11.block_sparse_moe.experts.2.w3.weight": "model-00002-of-00004.safetensors", "model.layers.11.block_sparse_moe.experts.3.w3.weight": "model-00002-of-00004.safetensors", "model.layers.11.block_sparse_moe.experts.0.w1.weight": "model-00002-of-00004.safetensors", "model.layers.11.block_sparse_moe.experts.1.w1.weight": "model-00002-of-00004.safetensors", "model.layers.11.block_sparse_moe.experts.2.w1.weight": "model-00002-of-00004.safetensors", "model.layers.11.block_sparse_moe.experts.3.w1.weight": "model-00002-of-00004.safetensors", "model.layers.11.block_sparse_moe.experts.0.w2.weight": "model-00002-of-00004.safetensors", "model.layers.11.block_sparse_moe.experts.1.w2.weight": "model-00002-of-00004.safetensors", "model.layers.11.block_sparse_moe.experts.2.w2.weight": "model-00002-of-00004.safetensors", "model.layers.11.block_sparse_moe.experts.3.w2.weight": "model-00002-of-00004.safetensors", "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.12.block_sparse_moe.experts.0.w3.weight": "model-00002-of-00004.safetensors", "model.layers.12.block_sparse_moe.experts.1.w3.weight": "model-00002-of-00004.safetensors", "model.layers.12.block_sparse_moe.experts.2.w3.weight": "model-00002-of-00004.safetensors", "model.layers.12.block_sparse_moe.experts.3.w3.weight": "model-00002-of-00004.safetensors", "model.layers.12.block_sparse_moe.experts.0.w1.weight": "model-00002-of-00004.safetensors", "model.layers.12.block_sparse_moe.experts.1.w1.weight": "model-00002-of-00004.safetensors", "model.layers.12.block_sparse_moe.experts.2.w1.weight": "model-00003-of-00004.safetensors", "model.layers.12.block_sparse_moe.experts.3.w1.weight": "model-00003-of-00004.safetensors", "model.layers.12.block_sparse_moe.experts.0.w2.weight": "model-00003-of-00004.safetensors", "model.layers.12.block_sparse_moe.experts.1.w2.weight": "model-00003-of-00004.safetensors", "model.layers.12.block_sparse_moe.experts.2.w2.weight": "model-00003-of-00004.safetensors", "model.layers.12.block_sparse_moe.experts.3.w2.weight": "model-00003-of-00004.safetensors", "model.layers.13.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.13.block_sparse_moe.experts.0.w3.weight": "model-00003-of-00004.safetensors", "model.layers.13.block_sparse_moe.experts.1.w3.weight": "model-00003-of-00004.safetensors", "model.layers.13.block_sparse_moe.experts.2.w3.weight": "model-00003-of-00004.safetensors", "model.layers.13.block_sparse_moe.experts.3.w3.weight": "model-00003-of-00004.safetensors", "model.layers.13.block_sparse_moe.experts.0.w1.weight": "model-00003-of-00004.safetensors", "model.layers.13.block_sparse_moe.experts.1.w1.weight": "model-00003-of-00004.safetensors", "model.layers.13.block_sparse_moe.experts.2.w1.weight": "model-00003-of-00004.safetensors", "model.layers.13.block_sparse_moe.experts.3.w1.weight": "model-00003-of-00004.safetensors", "model.layers.13.block_sparse_moe.experts.0.w2.weight": "model-00003-of-00004.safetensors", "model.layers.13.block_sparse_moe.experts.1.w2.weight": "model-00003-of-00004.safetensors", "model.layers.13.block_sparse_moe.experts.2.w2.weight": "model-00003-of-00004.safetensors", "model.layers.13.block_sparse_moe.experts.3.w2.weight": "model-00003-of-00004.safetensors", "model.layers.14.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.14.block_sparse_moe.experts.0.w3.weight": "model-00003-of-00004.safetensors", "model.layers.14.block_sparse_moe.experts.1.w3.weight": "model-00003-of-00004.safetensors", "model.layers.14.block_sparse_moe.experts.2.w3.weight": "model-00003-of-00004.safetensors", "model.layers.14.block_sparse_moe.experts.3.w3.weight": "model-00003-of-00004.safetensors", "model.layers.14.block_sparse_moe.experts.0.w1.weight": "model-00003-of-00004.safetensors", "model.layers.14.block_sparse_moe.experts.1.w1.weight": "model-00003-of-00004.safetensors", "model.layers.14.block_sparse_moe.experts.2.w1.weight": "model-00003-of-00004.safetensors", "model.layers.14.block_sparse_moe.experts.3.w1.weight": "model-00003-of-00004.safetensors", "model.layers.14.block_sparse_moe.experts.0.w2.weight": "model-00003-of-00004.safetensors", "model.layers.14.block_sparse_moe.experts.1.w2.weight": "model-00003-of-00004.safetensors", "model.layers.14.block_sparse_moe.experts.2.w2.weight": "model-00003-of-00004.safetensors", "model.layers.14.block_sparse_moe.experts.3.w2.weight": "model-00003-of-00004.safetensors", "model.layers.15.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.15.block_sparse_moe.experts.0.w3.weight": "model-00003-of-00004.safetensors", "model.layers.15.block_sparse_moe.experts.1.w3.weight": "model-00003-of-00004.safetensors", "model.layers.15.block_sparse_moe.experts.2.w3.weight": "model-00003-of-00004.safetensors", "model.layers.15.block_sparse_moe.experts.3.w3.weight": "model-00003-of-00004.safetensors", "model.layers.15.block_sparse_moe.experts.0.w1.weight": "model-00003-of-00004.safetensors", "model.layers.15.block_sparse_moe.experts.1.w1.weight": "model-00003-of-00004.safetensors", "model.layers.15.block_sparse_moe.experts.2.w1.weight": "model-00003-of-00004.safetensors", "model.layers.15.block_sparse_moe.experts.3.w1.weight": "model-00003-of-00004.safetensors", "model.layers.15.block_sparse_moe.experts.0.w2.weight": "model-00003-of-00004.safetensors", "model.layers.15.block_sparse_moe.experts.1.w2.weight": "model-00003-of-00004.safetensors", "model.layers.15.block_sparse_moe.experts.2.w2.weight": "model-00003-of-00004.safetensors", "model.layers.15.block_sparse_moe.experts.3.w2.weight": "model-00003-of-00004.safetensors", "model.layers.16.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.16.block_sparse_moe.experts.0.w3.weight": "model-00003-of-00004.safetensors", "model.layers.16.block_sparse_moe.experts.1.w3.weight": "model-00003-of-00004.safetensors", "model.layers.16.block_sparse_moe.experts.2.w3.weight": "model-00003-of-00004.safetensors", "model.layers.16.block_sparse_moe.experts.3.w3.weight": "model-00003-of-00004.safetensors", "model.layers.16.block_sparse_moe.experts.0.w1.weight": "model-00003-of-00004.safetensors", "model.layers.16.block_sparse_moe.experts.1.w1.weight": "model-00003-of-00004.safetensors", "model.layers.16.block_sparse_moe.experts.2.w1.weight": "model-00003-of-00004.safetensors", "model.layers.16.block_sparse_moe.experts.3.w1.weight": "model-00003-of-00004.safetensors", "model.layers.16.block_sparse_moe.experts.0.w2.weight": "model-00003-of-00004.safetensors", "model.layers.16.block_sparse_moe.experts.1.w2.weight": "model-00003-of-00004.safetensors", "model.layers.16.block_sparse_moe.experts.2.w2.weight": "model-00003-of-00004.safetensors", "model.layers.16.block_sparse_moe.experts.3.w2.weight": "model-00003-of-00004.safetensors", "model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.17.block_sparse_moe.experts.0.w3.weight": "model-00003-of-00004.safetensors", "model.layers.17.block_sparse_moe.experts.1.w3.weight": "model-00003-of-00004.safetensors", "model.layers.17.block_sparse_moe.experts.2.w3.weight": "model-00003-of-00004.safetensors", "model.layers.17.block_sparse_moe.experts.3.w3.weight": "model-00003-of-00004.safetensors", "model.layers.17.block_sparse_moe.experts.0.w1.weight": "model-00003-of-00004.safetensors", "model.layers.17.block_sparse_moe.experts.1.w1.weight": "model-00003-of-00004.safetensors", "model.layers.17.block_sparse_moe.experts.2.w1.weight": "model-00003-of-00004.safetensors", "model.layers.17.block_sparse_moe.experts.3.w1.weight": "model-00003-of-00004.safetensors", "model.layers.17.block_sparse_moe.experts.0.w2.weight": "model-00003-of-00004.safetensors", "model.layers.17.block_sparse_moe.experts.1.w2.weight": "model-00003-of-00004.safetensors", "model.layers.17.block_sparse_moe.experts.2.w2.weight": "model-00003-of-00004.safetensors", "model.layers.17.block_sparse_moe.experts.3.w2.weight": "model-00003-of-00004.safetensors", "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.18.block_sparse_moe.experts.0.w3.weight": "model-00003-of-00004.safetensors", "model.layers.18.block_sparse_moe.experts.1.w3.weight": "model-00003-of-00004.safetensors", "model.layers.18.block_sparse_moe.experts.2.w3.weight": "model-00003-of-00004.safetensors", "model.layers.18.block_sparse_moe.experts.3.w3.weight": "model-00003-of-00004.safetensors", "model.layers.18.block_sparse_moe.experts.0.w1.weight": "model-00003-of-00004.safetensors", "model.layers.18.block_sparse_moe.experts.1.w1.weight": "model-00003-of-00004.safetensors", "model.layers.18.block_sparse_moe.experts.2.w1.weight": "model-00003-of-00004.safetensors", "model.layers.18.block_sparse_moe.experts.3.w1.weight": "model-00003-of-00004.safetensors", "model.layers.18.block_sparse_moe.experts.0.w2.weight": "model-00003-of-00004.safetensors", "model.layers.18.block_sparse_moe.experts.1.w2.weight": "model-00003-of-00004.safetensors", "model.layers.18.block_sparse_moe.experts.2.w2.weight": "model-00003-of-00004.safetensors", "model.layers.18.block_sparse_moe.experts.3.w2.weight": "model-00003-of-00004.safetensors", "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.19.block_sparse_moe.experts.0.w3.weight": "model-00003-of-00004.safetensors", "model.layers.19.block_sparse_moe.experts.1.w3.weight": "model-00003-of-00004.safetensors", "model.layers.19.block_sparse_moe.experts.2.w3.weight": "model-00004-of-00004.safetensors", "model.layers.19.block_sparse_moe.experts.3.w3.weight": "model-00004-of-00004.safetensors", "model.layers.19.block_sparse_moe.experts.0.w1.weight": "model-00004-of-00004.safetensors", "model.layers.19.block_sparse_moe.experts.1.w1.weight": "model-00004-of-00004.safetensors", "model.layers.19.block_sparse_moe.experts.2.w1.weight": "model-00004-of-00004.safetensors", "model.layers.19.block_sparse_moe.experts.3.w1.weight": "model-00004-of-00004.safetensors", "model.layers.19.block_sparse_moe.experts.0.w2.weight": "model-00004-of-00004.safetensors", "model.layers.19.block_sparse_moe.experts.1.w2.weight": "model-00004-of-00004.safetensors", "model.layers.19.block_sparse_moe.experts.2.w2.weight": "model-00004-of-00004.safetensors", "model.layers.19.block_sparse_moe.experts.3.w2.weight": "model-00004-of-00004.safetensors", "model.layers.20.input_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00004-of-00004.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00004-of-00004.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00004-of-00004.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00004-of-00004.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.20.block_sparse_moe.experts.0.w3.weight": "model-00004-of-00004.safetensors", "model.layers.20.block_sparse_moe.experts.1.w3.weight": "model-00004-of-00004.safetensors", "model.layers.20.block_sparse_moe.experts.2.w3.weight": "model-00004-of-00004.safetensors", "model.layers.20.block_sparse_moe.experts.3.w3.weight": "model-00004-of-00004.safetensors", "model.layers.20.block_sparse_moe.experts.0.w1.weight": "model-00004-of-00004.safetensors", "model.layers.20.block_sparse_moe.experts.1.w1.weight": "model-00004-of-00004.safetensors", "model.layers.20.block_sparse_moe.experts.2.w1.weight": "model-00004-of-00004.safetensors", "model.layers.20.block_sparse_moe.experts.3.w1.weight": "model-00004-of-00004.safetensors", "model.layers.20.block_sparse_moe.experts.0.w2.weight": "model-00004-of-00004.safetensors", "model.layers.20.block_sparse_moe.experts.1.w2.weight": "model-00004-of-00004.safetensors", "model.layers.20.block_sparse_moe.experts.2.w2.weight": "model-00004-of-00004.safetensors", "model.layers.20.block_sparse_moe.experts.3.w2.weight": "model-00004-of-00004.safetensors", "model.layers.21.input_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00004-of-00004.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00004-of-00004.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00004-of-00004.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00004-of-00004.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.21.block_sparse_moe.experts.0.w3.weight": "model-00004-of-00004.safetensors", "model.layers.21.block_sparse_moe.experts.1.w3.weight": "model-00004-of-00004.safetensors", "model.layers.21.block_sparse_moe.experts.2.w3.weight": "model-00004-of-00004.safetensors", "model.layers.21.block_sparse_moe.experts.3.w3.weight": "model-00004-of-00004.safetensors", "model.layers.21.block_sparse_moe.experts.0.w1.weight": "model-00004-of-00004.safetensors", "model.layers.21.block_sparse_moe.experts.1.w1.weight": "model-00004-of-00004.safetensors", "model.layers.21.block_sparse_moe.experts.2.w1.weight": "model-00004-of-00004.safetensors", "model.layers.21.block_sparse_moe.experts.3.w1.weight": "model-00004-of-00004.safetensors", "model.layers.21.block_sparse_moe.experts.0.w2.weight": "model-00004-of-00004.safetensors", "model.layers.21.block_sparse_moe.experts.1.w2.weight": "model-00004-of-00004.safetensors", "model.layers.21.block_sparse_moe.experts.2.w2.weight": "model-00004-of-00004.safetensors", "model.layers.21.block_sparse_moe.experts.3.w2.weight": "model-00004-of-00004.safetensors", "model.layers.0.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.1.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.2.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.3.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.4.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.5.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.6.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.7.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.8.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.9.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.10.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.11.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.12.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.13.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.14.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.15.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.16.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.17.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.18.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.19.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.20.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors", "model.layers.21.block_sparse_moe.gate.weight": "model-00004-of-00004.safetensors"}}
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|im_end|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<|im_end|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "32001": {
38
+ "content": "<|im_start|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": false
44
+ }
45
+ },
46
+ "bos_token": "<s>",
47
+ "clean_up_tokenization_spaces": false,
48
+ "eos_token": "<|im_end|>",
49
+ "legacy": false,
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "pad_token": "<s>",
52
+ "padding_side": "left",
53
+ "sp_model_kwargs": {},
54
+ "spaces_between_special_tokens": false,
55
+ "tokenizer_class": "LlamaTokenizer",
56
+ "trust_remote_code": false,
57
+ "unk_token": "<unk>",
58
+ "use_default_system_prompt": false,
59
+ "use_fast": true
60
+ }