Upload tokenizer
Browse files- tokenizer.json +2 -4
- tokenizer_config.json +1 -1
tokenizer.json
CHANGED
@@ -53,8 +53,7 @@
|
|
53 |
"pre_tokenizer": {
|
54 |
"type": "ByteLevel",
|
55 |
"add_prefix_space": false,
|
56 |
-
"trim_offsets": true
|
57 |
-
"use_regex": true
|
58 |
},
|
59 |
"post_processor": {
|
60 |
"type": "RobertaProcessing",
|
@@ -72,8 +71,7 @@
|
|
72 |
"decoder": {
|
73 |
"type": "ByteLevel",
|
74 |
"add_prefix_space": true,
|
75 |
-
"trim_offsets": true
|
76 |
-
"use_regex": true
|
77 |
},
|
78 |
"model": {
|
79 |
"type": "BPE",
|
|
|
53 |
"pre_tokenizer": {
|
54 |
"type": "ByteLevel",
|
55 |
"add_prefix_space": false,
|
56 |
+
"trim_offsets": true
|
|
|
57 |
},
|
58 |
"post_processor": {
|
59 |
"type": "RobertaProcessing",
|
|
|
71 |
"decoder": {
|
72 |
"type": "ByteLevel",
|
73 |
"add_prefix_space": true,
|
74 |
+
"trim_offsets": true
|
|
|
75 |
},
|
76 |
"model": {
|
77 |
"type": "BPE",
|
tokenizer_config.json
CHANGED
@@ -6,7 +6,7 @@
|
|
6 |
"errors": "replace",
|
7 |
"mask_token": "<mask>",
|
8 |
"model_max_length": 1024,
|
9 |
-
"name_or_path": "saved_models/bart-large-cnn-wikipedia-paras-yake-importance-
|
10 |
"pad_token": "<pad>",
|
11 |
"sep_token": "</s>",
|
12 |
"special_tokens_map_file": null,
|
|
|
6 |
"errors": "replace",
|
7 |
"mask_token": "<mask>",
|
8 |
"model_max_length": 1024,
|
9 |
+
"name_or_path": "../saved_models/bart-large-cnn-wikipedia-paras-yake-importance-10000d-final",
|
10 |
"pad_token": "<pad>",
|
11 |
"sep_token": "</s>",
|
12 |
"special_tokens_map_file": null,
|