Training in progress, step 200
Browse files- .gitattributes +1 -0
- .gitignore +1 -0
- config.json +27 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +17 -0
- tokenizer.json +3 -0
- tokenizer_config.json +21 -0
- training_args.bin +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
checkpoint-*/
|
config.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "antphb/DS-Chatbox-facebook-xglm-564M-V3",
|
3 |
+
"activation_dropout": 0,
|
4 |
+
"activation_function": "gelu",
|
5 |
+
"architectures": [
|
6 |
+
"XGLMForCausalLM"
|
7 |
+
],
|
8 |
+
"attention_dropout": 0.1,
|
9 |
+
"attention_heads": 16,
|
10 |
+
"bos_token_id": 0,
|
11 |
+
"d_model": 1024,
|
12 |
+
"decoder_start_token_id": 2,
|
13 |
+
"dropout": 0.1,
|
14 |
+
"eos_token_id": 2,
|
15 |
+
"ffn_dim": 4096,
|
16 |
+
"init_std": 0.02,
|
17 |
+
"layerdrop": 0.0,
|
18 |
+
"max_position_embeddings": 2048,
|
19 |
+
"model_type": "xglm",
|
20 |
+
"num_layers": 24,
|
21 |
+
"pad_token_id": 1,
|
22 |
+
"scale_embedding": true,
|
23 |
+
"torch_dtype": "float32",
|
24 |
+
"transformers_version": "4.30.2",
|
25 |
+
"use_cache": false,
|
26 |
+
"vocab_size": 256008
|
27 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ae98e83cd805dd82896e7689b21dcb09b646bde2af0d69c14076340810e42237
|
3 |
+
size 2257982617
|
special_tokens_map.json
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<madeupword0>",
|
4 |
+
"<madeupword1>",
|
5 |
+
"<madeupword2>",
|
6 |
+
"<madeupword3>",
|
7 |
+
"<madeupword4>",
|
8 |
+
"<madeupword5>",
|
9 |
+
"<madeupword6>"
|
10 |
+
],
|
11 |
+
"bos_token": "<s>",
|
12 |
+
"cls_token": "<s>",
|
13 |
+
"eos_token": "</s>",
|
14 |
+
"pad_token": "</s>",
|
15 |
+
"sep_token": "</s>",
|
16 |
+
"unk_token": "<unk>"
|
17 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:00d163342a36b3ad1ea2f5f608e6bb2b2ff29bd453a41c4f52525a7ebc7c4b6a
|
3 |
+
size 17210041
|
tokenizer_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<madeupword0>",
|
4 |
+
"<madeupword1>",
|
5 |
+
"<madeupword2>",
|
6 |
+
"<madeupword3>",
|
7 |
+
"<madeupword4>",
|
8 |
+
"<madeupword5>",
|
9 |
+
"<madeupword6>"
|
10 |
+
],
|
11 |
+
"bos_token": "<s>",
|
12 |
+
"clean_up_tokenization_spaces": true,
|
13 |
+
"cls_token": "<s>",
|
14 |
+
"eos_token": "</s>",
|
15 |
+
"model_max_length": 2048,
|
16 |
+
"pad_token": "<pad>",
|
17 |
+
"sep_token": "</s>",
|
18 |
+
"sp_model_kwargs": {},
|
19 |
+
"tokenizer_class": "XGLMTokenizer",
|
20 |
+
"unk_token": "<unk>"
|
21 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:439b7946d0af1594330e4354ee4ae3530100b57bbe96d67842ff9507539649f3
|
3 |
+
size 3963
|