Files changed (4) hide show
  1. special_tokens_map.json +4 -11
  2. tokenizer.json +0 -0
  3. tokenizer_config.json +11 -32
  4. vocab.txt +0 -0
special_tokens_map.json CHANGED
@@ -1,14 +1,7 @@
1
  {
2
- "bos_token": "<s>",
3
- "cls_token": "</s>",
4
- "eos_token": "</s>",
5
  "pad_token": "[PAD]",
6
- "sep_token": "</s>",
7
- "unk_token": {
8
- "content": "",
9
- "lstrip": false,
10
- "normalized": true,
11
- "rstrip": false,
12
- "single_word": false
13
- }
14
  }
 
1
  {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
 
4
  "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
 
 
 
 
 
 
7
  }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,34 +1,13 @@
1
  {
2
- "add_bos_token": true,
3
- "add_eos_token": false,
4
- "add_prefix_space": false,
5
- "bos_token": {
6
- "__type": "AddedToken",
7
- "content": "",
8
- "lstrip": false,
9
- "normalized": true,
10
- "rstrip": false,
11
- "single_word": false
12
- },
13
- "clean_up_tokenization_spaces": false,
14
- "eos_token": {
15
- "__type": "AddedToken",
16
- "content": "",
17
- "lstrip": false,
18
- "normalized": true,
19
- "rstrip": false,
20
- "single_word": false
21
- },
22
- "model_max_length": 1000000000000000019884624838656,
23
- "pad_token": null,
24
- "sp_model_kwargs": {},
25
- "tokenizer_class": "LlamaTokenizer",
26
- "unk_token": {
27
- "__type": "AddedToken",
28
- "content": "",
29
- "lstrip": false,
30
- "normalized": true,
31
- "rstrip": false,
32
- "single_word": false
33
- }
34
  }
 
1
  {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": true,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 512,
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "strip_accents": null,
10
+ "tokenize_chinese_chars": true,
11
+ "tokenizer_class": "BertTokenizer",
12
+ "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff