oyxy2019 commited on
Commit
1996cb7
1 Parent(s): c7884a6

Upload tokenizer

Browse files
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|beginoftext|>": 21134,
3
+ "<|endoftext|>": 21133
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "<|beginoftext|>",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "<|endoftext|>",
5
+ "sep_token": "<|endoftext|>",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "<|beginoftext|>",
5
+ "do_basic_tokenize": true,
6
+ "do_lower_case": true,
7
+ "mask_token": "[MASK]",
8
+ "model_max_length": 1024,
9
+ "never_split": null,
10
+ "pad_token": "[PAD]",
11
+ "sep_token": "<|endoftext|>",
12
+ "strip_accents": null,
13
+ "tokenize_chinese_chars": true,
14
+ "tokenizer_class": "BertTokenizer",
15
+ "unk_token": "[UNK]"
16
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff