legraphista commited on
Commit
e60f166
1 Parent(s): dcfb8fe

Upload folder using huggingface_hub

Browse files
.ipynb_checkpoints/README-checkpoint.md ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ inference: false
3
+ license: other
4
+ license_name: mnpl
5
+ license_link: https://mistral.ai/licences/MNPL-0.1.md
6
+ tags:
7
+ - code
8
+ - FIM
9
+ - FIM fix
10
+ - tokenizer fix
11
+ language:
12
+ - code
13
+ base_model: mistralai/Codestral-22B-v0.1
14
+ ---
15
+
16
+ Converted using [this](https://huggingface.co/legraphista/Codestral-22B-v0.1-hf-FIM-fix/blob/main/convert_mistral_weights_to_hf-22B.py) script
17
+ Initial version of [mistralai/Codestral-22B-v0.1](https://huggingface.co/mistralai/Codestral-22B-v0.1/discussions/10) had missing tokens in vocab (see [discussion](https://huggingface.co/mistralai/Codestral-22B-v0.1/discussions/10))
18
+
19
+ This conversion is based on the newest version containing the fix and add the following tokens special tokens:
20
+ - `[INST]`
21
+ - `[/INST]`
22
+ - `[IMG]`
23
+ - `[PREFIX]`
24
+ - `[SUFFIX]`
25
+ - `[MIDDLE]`
26
+
27
+ ---
28
+
29
+ # Model Card for Codestral-22B-v0.1
30
+
31
+ Codestrall-22B-v0.1 is trained on a diverse dataset of 80+ programming languages, including the most popular ones, such as Python, Java, C, C++, JavaScript, and Bash (more details in the [Blogpost](https://mistral.ai/news/codestral/)). The model can be queried:
32
+ - As instruct, for instance to answer any questions about a code snippet (write documentation, explain, factorize) or to generate code following specific indications
33
+ - As Fill in the Middle (FIM), to predict the middle tokens between a prefix and a suffix (very useful for software development add-ons like in VS Code)
34
+
35
+
36
+ ## Installation
37
+
38
+ It is recommended to use `mistralai/Codestral-22B-v0.1` with [mistral-inference](https://github.com/mistralai/mistral-inference).
39
+
40
+ ```
41
+ pip install mistral_inference
42
+ ```
43
+
44
+ ## Download
45
+
46
+ ```py
47
+ from huggingface_hub import snapshot_download
48
+ from pathlib import Path
49
+
50
+ mistral_models_path = Path.home().joinpath('mistral_models', 'Codestral-22B-v0.1')
51
+ mistral_models_path.mkdir(parents=True, exist_ok=True)
52
+
53
+ snapshot_download(repo_id="mistralai/Codestral-22B-v0.1", allow_patterns=["params.json", "consolidated.safetensors", "tokenizer.model.v3"], local_dir=mistral_models_path)
54
+ ```
55
+
56
+ ### Chat
57
+
58
+ After installing `mistral_inference`, a `mistral-chat` CLI command should be available in your environment.
59
+
60
+ ```
61
+ mistral-chat $HOME/mistral_models/Codestral-22B-v0.1 --instruct --max_tokens 256
62
+ ```
63
+
64
+ Will generate an answer to "Write me a function that computes fibonacci in Rust" and should give something along the following lines:
65
+
66
+ ```
67
+ Sure, here's a simple implementation of a function that computes the Fibonacci sequence in Rust. This function takes an integer `n` as an argument and returns the `n`th Fibonacci number.
68
+
69
+ fn fibonacci(n: u32) -> u32 {
70
+ match n {
71
+ 0 => 0,
72
+ 1 => 1,
73
+ _ => fibonacci(n - 1) + fibonacci(n - 2),
74
+ }
75
+ }
76
+
77
+ fn main() {
78
+ let n = 10;
79
+ println!("The {}th Fibonacci number is: {}", n, fibonacci(n));
80
+ }
81
+
82
+ This function uses recursion to calculate the Fibonacci number. However, it's not the most efficient solution because it performs a lot of redundant calculations. A more efficient solution would use a loop to iteratively calculate the Fibonacci numbers.
83
+ ```
84
+
85
+
86
+ ### Fill-in-the-middle (FIM)
87
+
88
+ After installing `mistral_inference` and running `pip install --upgrade mistral_common` to make sure to have mistral_common>=1.2 installed:
89
+
90
+ ```py
91
+ from mistral_inference.model import Transformer
92
+ from mistral_inference.generate import generate
93
+ from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
94
+ from mistral_common.tokens.instruct.request import FIMRequest
95
+
96
+ tokenizer = MistralTokenizer.v3()
97
+ model = Transformer.from_folder("~/codestral-22B-240529")
98
+
99
+ prefix = """def add("""
100
+ suffix = """ return sum"""
101
+
102
+ request = FIMRequest(prompt=prefix, suffix=suffix)
103
+
104
+ tokens = tokenizer.encode_fim(request).tokens
105
+
106
+ out_tokens, _ = generate([tokens], model, max_tokens=256, temperature=0.0, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
107
+ result = tokenizer.decode(out_tokens[0])
108
+
109
+ middle = result.split(suffix)[0].strip()
110
+ print(middle)
111
+ ```
112
+
113
+ Should give something along the following lines:
114
+
115
+ ```
116
+ num1, num2):
117
+
118
+ # Add two numbers
119
+ sum = num1 + num2
120
+
121
+ # return the sum
122
+ ```
123
+
124
+ ## Limitations
125
+
126
+ The Codestral-22B-v0.1 does not have any moderation mechanisms. We're looking forward to engaging with the community on ways to
127
+ make the model finely respect guardrails, allowing for deployment in environments requiring moderated outputs.
128
+
129
+ ## License
130
+
131
+ Codestral-22B-v0.1 is released under the `MNLP-0.1` license.
132
+
133
+ ## The Mistral AI Team
134
+
135
+ Albert Jiang, Alexandre Sablayrolles, Alexis Tacnet, Antoine Roux, Arthur Mensch, Audrey Herblin-Stoop, Baptiste Bout, Baudouin de Monicault, Blanche Savary, Bam4d, Caroline Feldman, Devendra Singh Chaplot, Diego de las Casas, Eleonore Arcelin, Emma Bou Hanna, Etienne Metzger, Gianna Lengyel, Guillaume Bour, Guillaume Lample, Harizo Rajaona, Henri Roussez, Jean-Malo Delignon, Jia Li, Justus Murke, Kartik Khandelwal, Lawrence Stewart, Louis Martin, Louis Ternon, Lucile Saulnier, Lélio Renard Lavaud, Margaret Jennings, Marie Pellat, Marie Torelli, Marie-Anne Lachaux, Marjorie Janiewicz, Mickael Seznec, Nicolas Schuhl, Patrick von Platen, Romain Sauvestre, Pierre Stock, Sandeep Subramanian, Saurabh Garg, Sophia Yang, Szymon Antoniak, Teven Le Scao, Thibaut Lavril, Thibault Schueller, Timothée Lacroix, Théophile Gervet, Thomas Wang, Valera Nemychnikova, Wendy Shang, William El Sayed, William Marshall
README.md ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ inference: false
3
+ license: other
4
+ license_name: mnpl
5
+ license_link: https://mistral.ai/licences/MNPL-0.1.md
6
+ tags:
7
+ - code
8
+ - FIM
9
+ - FIM fix
10
+ - tokenizer fix
11
+ language:
12
+ - code
13
+ base_model: mistralai/Codestral-22B-v0.1
14
+ ---
15
+
16
+ Converted using [this](https://huggingface.co/legraphista/Codestral-22B-v0.1-hf-FIM-fix/blob/main/convert_mistral_weights_to_hf-22B.py) script
17
+ Initial version of [mistralai/Codestral-22B-v0.1](https://huggingface.co/mistralai/Codestral-22B-v0.1/discussions/10) had missing tokens in vocab (see [discussion](https://huggingface.co/mistralai/Codestral-22B-v0.1/discussions/10))
18
+
19
+ This conversion is based on the newest version containing the fix and add the following tokens special tokens:
20
+ - `[INST]`
21
+ - `[/INST]`
22
+ - `[IMG]`
23
+ - `[PREFIX]`
24
+ - `[SUFFIX]`
25
+ - `[MIDDLE]`
26
+
27
+ ---
28
+
29
+ # Model Card for Codestral-22B-v0.1
30
+
31
+ Codestrall-22B-v0.1 is trained on a diverse dataset of 80+ programming languages, including the most popular ones, such as Python, Java, C, C++, JavaScript, and Bash (more details in the [Blogpost](https://mistral.ai/news/codestral/)). The model can be queried:
32
+ - As instruct, for instance to answer any questions about a code snippet (write documentation, explain, factorize) or to generate code following specific indications
33
+ - As Fill in the Middle (FIM), to predict the middle tokens between a prefix and a suffix (very useful for software development add-ons like in VS Code)
34
+
35
+
36
+ ## Installation
37
+
38
+ It is recommended to use `mistralai/Codestral-22B-v0.1` with [mistral-inference](https://github.com/mistralai/mistral-inference).
39
+
40
+ ```
41
+ pip install mistral_inference
42
+ ```
43
+
44
+ ## Download
45
+
46
+ ```py
47
+ from huggingface_hub import snapshot_download
48
+ from pathlib import Path
49
+
50
+ mistral_models_path = Path.home().joinpath('mistral_models', 'Codestral-22B-v0.1')
51
+ mistral_models_path.mkdir(parents=True, exist_ok=True)
52
+
53
+ snapshot_download(repo_id="mistralai/Codestral-22B-v0.1", allow_patterns=["params.json", "consolidated.safetensors", "tokenizer.model.v3"], local_dir=mistral_models_path)
54
+ ```
55
+
56
+ ### Chat
57
+
58
+ After installing `mistral_inference`, a `mistral-chat` CLI command should be available in your environment.
59
+
60
+ ```
61
+ mistral-chat $HOME/mistral_models/Codestral-22B-v0.1 --instruct --max_tokens 256
62
+ ```
63
+
64
+ Will generate an answer to "Write me a function that computes fibonacci in Rust" and should give something along the following lines:
65
+
66
+ ```
67
+ Sure, here's a simple implementation of a function that computes the Fibonacci sequence in Rust. This function takes an integer `n` as an argument and returns the `n`th Fibonacci number.
68
+
69
+ fn fibonacci(n: u32) -> u32 {
70
+ match n {
71
+ 0 => 0,
72
+ 1 => 1,
73
+ _ => fibonacci(n - 1) + fibonacci(n - 2),
74
+ }
75
+ }
76
+
77
+ fn main() {
78
+ let n = 10;
79
+ println!("The {}th Fibonacci number is: {}", n, fibonacci(n));
80
+ }
81
+
82
+ This function uses recursion to calculate the Fibonacci number. However, it's not the most efficient solution because it performs a lot of redundant calculations. A more efficient solution would use a loop to iteratively calculate the Fibonacci numbers.
83
+ ```
84
+
85
+
86
+ ### Fill-in-the-middle (FIM)
87
+
88
+ After installing `mistral_inference` and running `pip install --upgrade mistral_common` to make sure to have mistral_common>=1.2 installed:
89
+
90
+ ```py
91
+ from mistral_inference.model import Transformer
92
+ from mistral_inference.generate import generate
93
+ from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
94
+ from mistral_common.tokens.instruct.request import FIMRequest
95
+
96
+ tokenizer = MistralTokenizer.v3()
97
+ model = Transformer.from_folder("~/codestral-22B-240529")
98
+
99
+ prefix = """def add("""
100
+ suffix = """ return sum"""
101
+
102
+ request = FIMRequest(prompt=prefix, suffix=suffix)
103
+
104
+ tokens = tokenizer.encode_fim(request).tokens
105
+
106
+ out_tokens, _ = generate([tokens], model, max_tokens=256, temperature=0.0, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
107
+ result = tokenizer.decode(out_tokens[0])
108
+
109
+ middle = result.split(suffix)[0].strip()
110
+ print(middle)
111
+ ```
112
+
113
+ Should give something along the following lines:
114
+
115
+ ```
116
+ num1, num2):
117
+
118
+ # Add two numbers
119
+ sum = num1 + num2
120
+
121
+ # return the sum
122
+ ```
123
+
124
+ ## Limitations
125
+
126
+ The Codestral-22B-v0.1 does not have any moderation mechanisms. We're looking forward to engaging with the community on ways to
127
+ make the model finely respect guardrails, allowing for deployment in environments requiring moderated outputs.
128
+
129
+ ## License
130
+
131
+ Codestral-22B-v0.1 is released under the `MNLP-0.1` license.
132
+
133
+ ## The Mistral AI Team
134
+
135
+ Albert Jiang, Alexandre Sablayrolles, Alexis Tacnet, Antoine Roux, Arthur Mensch, Audrey Herblin-Stoop, Baptiste Bout, Baudouin de Monicault, Blanche Savary, Bam4d, Caroline Feldman, Devendra Singh Chaplot, Diego de las Casas, Eleonore Arcelin, Emma Bou Hanna, Etienne Metzger, Gianna Lengyel, Guillaume Bour, Guillaume Lample, Harizo Rajaona, Henri Roussez, Jean-Malo Delignon, Jia Li, Justus Murke, Kartik Khandelwal, Lawrence Stewart, Louis Martin, Louis Ternon, Lucile Saulnier, Lélio Renard Lavaud, Margaret Jennings, Marie Pellat, Marie Torelli, Marie-Anne Lachaux, Marjorie Janiewicz, Mickael Seznec, Nicolas Schuhl, Patrick von Platen, Romain Sauvestre, Pierre Stock, Sandeep Subramanian, Saurabh Garg, Sophia Yang, Szymon Antoniak, Teven Le Scao, Thibaut Lavril, Thibault Schueller, Timothée Lacroix, Théophile Gervet, Thomas Wang, Valera Nemychnikova, Wendy Shang, William El Sayed, William Marshall
config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MistralForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 6144,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 16384,
12
+ "max_position_embeddings": 32768,
13
+ "model_type": "mistral",
14
+ "num_attention_heads": 48,
15
+ "num_hidden_layers": 56,
16
+ "num_key_value_heads": 8,
17
+ "rms_norm_eps": 1e-05,
18
+ "rope_theta": 1000000.0,
19
+ "sliding_window": null,
20
+ "tie_word_embeddings": false,
21
+ "torch_dtype": "bfloat16",
22
+ "transformers_version": "4.41.1",
23
+ "use_cache": true,
24
+ "vocab_size": 32768
25
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.41.1"
6
+ }
model-00001-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c911e66544527032c9e49f602ed0645f748045248eb8fb8ec9982866b899674
3
+ size 4882298776
model-00002-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc1de07197a04eaeeaa6dcb7ed6604f729ed822e92273c25c112f85c366b5696
3
+ size 4983012160
model-00003-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90dc483e3b22d3d21a03edd588a8ffe5743b8dea33fc9f1ffc01eb1e529aedf8
3
+ size 4957821336
model-00004-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76ee31da7cdd8fde0a257030ffdf7d3fb293935a62b8469f6dec1c1a19e14eee
3
+ size 4882323744
model-00005-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d56824727ffaf568f7a1c7770fd5cb531df71ebe143567b1cb3968aca7f98cd
3
+ size 4983012192
model-00006-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f30bb3fdbcad8d1c00e0b421908bebc6cb5544669cd3c916ae592acb7263ae4
3
+ size 4957821336
model-00007-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54eb704485dce4f8c7c245169d25f394ea08dec1562a1ab981715f294ef93314
3
+ size 4882323744
model-00008-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfbe26e02d475904ecc92cbe54a614607156aabed3503867d8af5023673d6374
3
+ size 4983012192
model-00009-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c834720ddae75dc683e52284ffe27ea35f48eb2c5500c71025925fe0dd398a8c
3
+ size 4982999056
model.safetensors.index.json ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 44494565376
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00009-of-00009.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00009.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00009.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00009.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00009.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00009.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00003-of-00009.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00003-of-00009.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00003-of-00009.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00003-of-00009.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00003-of-00009.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00009.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00004-of-00009.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00004-of-00009.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00009.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00004-of-00009.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00004-of-00009.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00004-of-00009.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00004-of-00009.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00005-of-00009.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00005-of-00009.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00005-of-00009.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00005-of-00009.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00005-of-00009.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00005-of-00009.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00009.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00005-of-00009.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00006-of-00009.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
242
+ "model.layers.32.input_layernorm.weight": "model-00006-of-00009.safetensors",
243
+ "model.layers.32.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
244
+ "model.layers.32.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
245
+ "model.layers.32.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
246
+ "model.layers.32.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
247
+ "model.layers.32.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
248
+ "model.layers.32.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
249
+ "model.layers.32.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
250
+ "model.layers.32.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
251
+ "model.layers.33.input_layernorm.weight": "model-00006-of-00009.safetensors",
252
+ "model.layers.33.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
253
+ "model.layers.33.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
254
+ "model.layers.33.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
255
+ "model.layers.33.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
256
+ "model.layers.33.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
257
+ "model.layers.33.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
258
+ "model.layers.33.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
259
+ "model.layers.33.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
260
+ "model.layers.34.input_layernorm.weight": "model-00006-of-00009.safetensors",
261
+ "model.layers.34.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
262
+ "model.layers.34.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
263
+ "model.layers.34.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
264
+ "model.layers.34.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
265
+ "model.layers.34.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
266
+ "model.layers.34.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
267
+ "model.layers.34.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
268
+ "model.layers.34.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
269
+ "model.layers.35.input_layernorm.weight": "model-00006-of-00009.safetensors",
270
+ "model.layers.35.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
271
+ "model.layers.35.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
272
+ "model.layers.35.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
273
+ "model.layers.35.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
274
+ "model.layers.35.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
275
+ "model.layers.35.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
276
+ "model.layers.35.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
277
+ "model.layers.35.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
278
+ "model.layers.36.input_layernorm.weight": "model-00006-of-00009.safetensors",
279
+ "model.layers.36.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
280
+ "model.layers.36.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
281
+ "model.layers.36.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
282
+ "model.layers.36.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
283
+ "model.layers.36.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
284
+ "model.layers.36.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
285
+ "model.layers.36.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
286
+ "model.layers.36.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
287
+ "model.layers.37.input_layernorm.weight": "model-00007-of-00009.safetensors",
288
+ "model.layers.37.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
289
+ "model.layers.37.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
290
+ "model.layers.37.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
291
+ "model.layers.37.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
292
+ "model.layers.37.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
293
+ "model.layers.37.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
294
+ "model.layers.37.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
295
+ "model.layers.37.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
296
+ "model.layers.38.input_layernorm.weight": "model-00007-of-00009.safetensors",
297
+ "model.layers.38.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
298
+ "model.layers.38.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
299
+ "model.layers.38.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
300
+ "model.layers.38.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
301
+ "model.layers.38.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
302
+ "model.layers.38.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
303
+ "model.layers.38.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
304
+ "model.layers.38.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
305
+ "model.layers.39.input_layernorm.weight": "model-00007-of-00009.safetensors",
306
+ "model.layers.39.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
307
+ "model.layers.39.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
308
+ "model.layers.39.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
309
+ "model.layers.39.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
310
+ "model.layers.39.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
311
+ "model.layers.39.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
312
+ "model.layers.39.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
313
+ "model.layers.39.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
314
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00009.safetensors",
315
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
316
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
317
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
318
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
319
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
320
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
321
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
322
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
323
+ "model.layers.40.input_layernorm.weight": "model-00007-of-00009.safetensors",
324
+ "model.layers.40.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
325
+ "model.layers.40.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
326
+ "model.layers.40.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
327
+ "model.layers.40.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
328
+ "model.layers.40.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
329
+ "model.layers.40.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
330
+ "model.layers.40.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
331
+ "model.layers.40.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
332
+ "model.layers.41.input_layernorm.weight": "model-00007-of-00009.safetensors",
333
+ "model.layers.41.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
334
+ "model.layers.41.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
335
+ "model.layers.41.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
336
+ "model.layers.41.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
337
+ "model.layers.41.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
338
+ "model.layers.41.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
339
+ "model.layers.41.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
340
+ "model.layers.41.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
341
+ "model.layers.42.input_layernorm.weight": "model-00007-of-00009.safetensors",
342
+ "model.layers.42.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
343
+ "model.layers.42.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
344
+ "model.layers.42.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
345
+ "model.layers.42.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
346
+ "model.layers.42.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
347
+ "model.layers.42.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
348
+ "model.layers.42.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
349
+ "model.layers.42.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
350
+ "model.layers.43.input_layernorm.weight": "model-00008-of-00009.safetensors",
351
+ "model.layers.43.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
352
+ "model.layers.43.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
353
+ "model.layers.43.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
354
+ "model.layers.43.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
355
+ "model.layers.43.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
356
+ "model.layers.43.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
357
+ "model.layers.43.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
358
+ "model.layers.43.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
359
+ "model.layers.44.input_layernorm.weight": "model-00008-of-00009.safetensors",
360
+ "model.layers.44.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
361
+ "model.layers.44.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
362
+ "model.layers.44.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
363
+ "model.layers.44.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
364
+ "model.layers.44.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
365
+ "model.layers.44.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
366
+ "model.layers.44.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
367
+ "model.layers.44.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
368
+ "model.layers.45.input_layernorm.weight": "model-00008-of-00009.safetensors",
369
+ "model.layers.45.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
370
+ "model.layers.45.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
371
+ "model.layers.45.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
372
+ "model.layers.45.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
373
+ "model.layers.45.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
374
+ "model.layers.45.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
375
+ "model.layers.45.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
376
+ "model.layers.45.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
377
+ "model.layers.46.input_layernorm.weight": "model-00008-of-00009.safetensors",
378
+ "model.layers.46.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
379
+ "model.layers.46.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
380
+ "model.layers.46.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
381
+ "model.layers.46.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
382
+ "model.layers.46.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
383
+ "model.layers.46.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
384
+ "model.layers.46.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
385
+ "model.layers.46.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
386
+ "model.layers.47.input_layernorm.weight": "model-00008-of-00009.safetensors",
387
+ "model.layers.47.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
388
+ "model.layers.47.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
389
+ "model.layers.47.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
390
+ "model.layers.47.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
391
+ "model.layers.47.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
392
+ "model.layers.47.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
393
+ "model.layers.47.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
394
+ "model.layers.47.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
395
+ "model.layers.48.input_layernorm.weight": "model-00008-of-00009.safetensors",
396
+ "model.layers.48.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
397
+ "model.layers.48.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
398
+ "model.layers.48.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
399
+ "model.layers.48.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
400
+ "model.layers.48.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
401
+ "model.layers.48.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
402
+ "model.layers.48.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
403
+ "model.layers.48.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
404
+ "model.layers.49.input_layernorm.weight": "model-00008-of-00009.safetensors",
405
+ "model.layers.49.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
406
+ "model.layers.49.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
407
+ "model.layers.49.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
408
+ "model.layers.49.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
409
+ "model.layers.49.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
410
+ "model.layers.49.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
411
+ "model.layers.49.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
412
+ "model.layers.49.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
413
+ "model.layers.5.input_layernorm.weight": "model-00002-of-00009.safetensors",
414
+ "model.layers.5.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
415
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
416
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
417
+ "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
418
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
419
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
420
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
421
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
422
+ "model.layers.50.input_layernorm.weight": "model-00009-of-00009.safetensors",
423
+ "model.layers.50.mlp.down_proj.weight": "model-00009-of-00009.safetensors",
424
+ "model.layers.50.mlp.gate_proj.weight": "model-00009-of-00009.safetensors",
425
+ "model.layers.50.mlp.up_proj.weight": "model-00009-of-00009.safetensors",
426
+ "model.layers.50.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
427
+ "model.layers.50.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
428
+ "model.layers.50.self_attn.o_proj.weight": "model-00009-of-00009.safetensors",
429
+ "model.layers.50.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
430
+ "model.layers.50.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
431
+ "model.layers.51.input_layernorm.weight": "model-00009-of-00009.safetensors",
432
+ "model.layers.51.mlp.down_proj.weight": "model-00009-of-00009.safetensors",
433
+ "model.layers.51.mlp.gate_proj.weight": "model-00009-of-00009.safetensors",
434
+ "model.layers.51.mlp.up_proj.weight": "model-00009-of-00009.safetensors",
435
+ "model.layers.51.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
436
+ "model.layers.51.self_attn.k_proj.weight": "model-00009-of-00009.safetensors",
437
+ "model.layers.51.self_attn.o_proj.weight": "model-00009-of-00009.safetensors",
438
+ "model.layers.51.self_attn.q_proj.weight": "model-00009-of-00009.safetensors",
439
+ "model.layers.51.self_attn.v_proj.weight": "model-00009-of-00009.safetensors",
440
+ "model.layers.52.input_layernorm.weight": "model-00009-of-00009.safetensors",
441
+ "model.layers.52.mlp.down_proj.weight": "model-00009-of-00009.safetensors",
442
+ "model.layers.52.mlp.gate_proj.weight": "model-00009-of-00009.safetensors",
443
+ "model.layers.52.mlp.up_proj.weight": "model-00009-of-00009.safetensors",
444
+ "model.layers.52.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
445
+ "model.layers.52.self_attn.k_proj.weight": "model-00009-of-00009.safetensors",
446
+ "model.layers.52.self_attn.o_proj.weight": "model-00009-of-00009.safetensors",
447
+ "model.layers.52.self_attn.q_proj.weight": "model-00009-of-00009.safetensors",
448
+ "model.layers.52.self_attn.v_proj.weight": "model-00009-of-00009.safetensors",
449
+ "model.layers.53.input_layernorm.weight": "model-00009-of-00009.safetensors",
450
+ "model.layers.53.mlp.down_proj.weight": "model-00009-of-00009.safetensors",
451
+ "model.layers.53.mlp.gate_proj.weight": "model-00009-of-00009.safetensors",
452
+ "model.layers.53.mlp.up_proj.weight": "model-00009-of-00009.safetensors",
453
+ "model.layers.53.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
454
+ "model.layers.53.self_attn.k_proj.weight": "model-00009-of-00009.safetensors",
455
+ "model.layers.53.self_attn.o_proj.weight": "model-00009-of-00009.safetensors",
456
+ "model.layers.53.self_attn.q_proj.weight": "model-00009-of-00009.safetensors",
457
+ "model.layers.53.self_attn.v_proj.weight": "model-00009-of-00009.safetensors",
458
+ "model.layers.54.input_layernorm.weight": "model-00009-of-00009.safetensors",
459
+ "model.layers.54.mlp.down_proj.weight": "model-00009-of-00009.safetensors",
460
+ "model.layers.54.mlp.gate_proj.weight": "model-00009-of-00009.safetensors",
461
+ "model.layers.54.mlp.up_proj.weight": "model-00009-of-00009.safetensors",
462
+ "model.layers.54.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
463
+ "model.layers.54.self_attn.k_proj.weight": "model-00009-of-00009.safetensors",
464
+ "model.layers.54.self_attn.o_proj.weight": "model-00009-of-00009.safetensors",
465
+ "model.layers.54.self_attn.q_proj.weight": "model-00009-of-00009.safetensors",
466
+ "model.layers.54.self_attn.v_proj.weight": "model-00009-of-00009.safetensors",
467
+ "model.layers.55.input_layernorm.weight": "model-00009-of-00009.safetensors",
468
+ "model.layers.55.mlp.down_proj.weight": "model-00009-of-00009.safetensors",
469
+ "model.layers.55.mlp.gate_proj.weight": "model-00009-of-00009.safetensors",
470
+ "model.layers.55.mlp.up_proj.weight": "model-00009-of-00009.safetensors",
471
+ "model.layers.55.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
472
+ "model.layers.55.self_attn.k_proj.weight": "model-00009-of-00009.safetensors",
473
+ "model.layers.55.self_attn.o_proj.weight": "model-00009-of-00009.safetensors",
474
+ "model.layers.55.self_attn.q_proj.weight": "model-00009-of-00009.safetensors",
475
+ "model.layers.55.self_attn.v_proj.weight": "model-00009-of-00009.safetensors",
476
+ "model.layers.6.input_layernorm.weight": "model-00002-of-00009.safetensors",
477
+ "model.layers.6.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
478
+ "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
479
+ "model.layers.6.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
480
+ "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
481
+ "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
482
+ "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
483
+ "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
484
+ "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
485
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00009.safetensors",
486
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
487
+ "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
488
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
489
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
490
+ "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
491
+ "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
492
+ "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
493
+ "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
494
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00009.safetensors",
495
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
496
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
497
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
498
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
499
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
500
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
501
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
502
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
503
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00009.safetensors",
504
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
505
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
506
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
507
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
508
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
509
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
510
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
511
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
512
+ "model.norm.weight": "model-00009-of-00009.safetensors"
513
+ }
514
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9addc8bdce5988448ae81b729336f43a81262160ae8da760674badab9d4c7d33
3
+ size 587591
tokenizer_config.json ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "3": {
31
+ "content": "[INST]",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": false
37
+ },
38
+ "4": {
39
+ "content": "[/INST]",
40
+ "lstrip": false,
41
+ "normalized": true,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": false
45
+ },
46
+ "10": {
47
+ "content": "[IMG]",
48
+ "lstrip": false,
49
+ "normalized": true,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": false
53
+ },
54
+ "11": {
55
+ "content": "[PREFIX]",
56
+ "lstrip": false,
57
+ "normalized": true,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": false
61
+ },
62
+ "12": {
63
+ "content": "[MIDDLE]",
64
+ "lstrip": false,
65
+ "normalized": true,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": false
69
+ },
70
+ "13": {
71
+ "content": "[SUFFIX]",
72
+ "lstrip": false,
73
+ "normalized": true,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": false
77
+ }
78
+ },
79
+ "bos_token": "<s>",
80
+ "clean_up_tokenization_spaces": false,
81
+ "eos_token": "</s>",
82
+ "legacy": true,
83
+ "model_max_length": 1000000000000000019884624838656,
84
+ "pad_token": null,
85
+ "sp_model_kwargs": {},
86
+ "spaces_between_special_tokens": false,
87
+ "tokenizer_class": "LlamaTokenizer",
88
+ "unk_token": "<unk>",
89
+ "use_default_system_prompt": false
90
+ }