Sirclavin commited on
Commit
4e11be5
1 Parent(s): 9b3e1bd

Upload 10 files

Browse files
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - autotrain
5
+ - text-generation
6
+ base_model: Locutusque/TinyMistral-248M
7
+ datasets:
8
+ - OpenAssistant/oasst_top1_2023-08-25
9
+ widget:
10
+ - text: |-
11
+ <|im_start|>user
12
+ Write the specs of a game about trolls and warriors in a fantasy world.<|im_end|>
13
+ <|im_start|>assistant
14
+ The game is an adventure game that takes place on a planet, where players must explore their unique abilities to survive. Players can use different strategies such as collecting items or trading them for gold or silver coins, but they also need to learn how to deal with obstacles and find new ways to escape.<|im_end|>
15
+ <|im_start|>user
16
+ Could you tell me something curious about the Earth?<|im_end|>
17
+ <|im_start|>assistant
18
+ The planet is a large, rocky world with an atmosphere of 10 billion years old and a surface area around 25 million miles (36 million kilometers) wide.<|im_end|>
19
+ <|im_start|>user
20
+ What are some potential applications for quantum computing?<|im_end|>
21
+ <|im_start|>assistant
22
+ inference:
23
+ parameters:
24
+ max_new_tokens: 64
25
+ repetition_penalty: 1.18
26
+ ---
27
+
28
+ # Locutusque's TinyMistral-248M trained on OpenAssistant TOP-1 Conversation Threads
29
+
30
+ - Base model: [Locutusque/TinyMistral-248M](https://huggingface.co/Locutusque/TinyMistral-248M/blob/90b89d18fdf27937dc04ab8a9b543c5af2991c7f/README.md)
31
+ - Dataset: [OpenAssistant/oasst_top1_2023-08-25](https://huggingface.co/datasets/OpenAssistant/oasst_top1_2023-08-25)
32
+
33
+ ## Recommended Prompt Format
34
+
35
+ ```
36
+ <|im_start|>user
37
+ {message}<|im_end|>
38
+ <|im_start|>assistant
39
+ ```
40
+
41
+ ## How it was trained
42
+
43
+ ```ipython
44
+ %pip install autotrain-advanced
45
+
46
+ !autotrain setup
47
+
48
+ !autotrain llm \
49
+ --train \
50
+ --trainer "sft" \
51
+ --model './TinyMistral-248M/' \
52
+ --model_max_length 4096 \
53
+ --block-size 1024 \
54
+ --project-name 'trained-model' \
55
+ --data-path "OpenAssistant/oasst_top1_2023-08-25" \
56
+ --train_split "train" \
57
+ --valid_split "test" \
58
+ --text-column "text" \
59
+ --lr 1e-5 \
60
+ --train_batch_size 2 \
61
+ --epochs 5 \
62
+ --evaluation_strategy "steps" \
63
+ --save-strategy "steps" \
64
+ --save-total-limit 2 \
65
+ --warmup-ratio 0.05 \
66
+ --weight-decay 0.0 \
67
+ --gradient-accumulation 8 \
68
+ --logging-steps 10 \
69
+ --scheduler "constant"
70
+ ```
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|bos|>": 32000,
3
+ "<|endoftext|>": 32001,
4
+ "[PAD]": 32002
5
+ }
config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./TinyMistral-248M/",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 1024,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 4096,
12
+ "max_position_embeddings": 32768,
13
+ "model_type": "mistral",
14
+ "num_attention_heads": 32,
15
+ "num_hidden_layers": 12,
16
+ "num_key_value_heads": 8,
17
+ "rms_norm_eps": 1e-06,
18
+ "rope_theta": 10000.0,
19
+ "sliding_window": 32,
20
+ "tie_word_embeddings": false,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.34.1",
23
+ "use_cache": false,
24
+ "vocab_size": 32003
25
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.34.1"
6
+ }
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e6a6b3157a6c75fef55eb31e0d8662bff109779e98e56d652c6e38d0f8c7d28
3
+ size 992092328
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "32000": {
28
+ "content": "<|bos|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "32001": {
36
+ "content": "<|endoftext|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "32002": {
44
+ "content": "[PAD]",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ }
51
+ },
52
+ "additional_special_tokens": [],
53
+ "bos_token": "<s>",
54
+ "clean_up_tokenization_spaces": false,
55
+ "eos_token": "</s>",
56
+ "legacy": true,
57
+ "max_length": 1536,
58
+ "model_max_length": 4096,
59
+ "pad_token": "[PAD]",
60
+ "sp_model_kwargs": {},
61
+ "spaces_between_special_tokens": false,
62
+ "stride": 0,
63
+ "tokenizer_class": "LlamaTokenizer",
64
+ "truncation_side": "right",
65
+ "truncation_strategy": "longest_first",
66
+ "unk_token": "<unk>",
67
+ "use_default_system_prompt": true
68
+ }