alexkueck commited on
Commit
1769a7e
1 Parent(s): f89e103

Upload tokenizer

Browse files
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|pad|>": 50258,
3
+ "<|startoftext|>": 50257
4
+ }
special_tokens_map.json CHANGED
@@ -1,5 +1,24 @@
1
  {
2
- "bos_token": "<|endoftext|>",
3
- "eos_token": "<|endoftext|>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  "unk_token": "<|endoftext|>"
5
  }
 
1
  {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|pad|>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
  "unk_token": "<|endoftext|>"
24
  }
tokenizer.json CHANGED
@@ -11,6 +11,24 @@
11
  "rstrip": false,
12
  "normalized": false,
13
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  }
15
  ],
16
  "normalizer": null,
 
11
  "rstrip": false,
12
  "normalized": false,
13
  "special": true
14
+ },
15
+ {
16
+ "id": 50257,
17
+ "content": "<|startoftext|>",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": true,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 50258,
26
+ "content": "<|pad|>",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": true,
31
+ "special": true
32
  }
33
  ],
34
  "normalizer": null,
tokenizer_config.json CHANGED
@@ -3,7 +3,7 @@
3
  "add_prefix_space": false,
4
  "bos_token": {
5
  "__type": "AddedToken",
6
- "content": "<|endoftext|>",
7
  "lstrip": false,
8
  "normalized": true,
9
  "rstrip": false,
@@ -20,7 +20,14 @@
20
  },
21
  "errors": "replace",
22
  "model_max_length": 2048,
23
- "pad_token": null,
 
 
 
 
 
 
 
24
  "tokenizer_class": "GPT2Tokenizer",
25
  "unk_token": {
26
  "__type": "AddedToken",
 
3
  "add_prefix_space": false,
4
  "bos_token": {
5
  "__type": "AddedToken",
6
+ "content": "<|startoftext|>",
7
  "lstrip": false,
8
  "normalized": true,
9
  "rstrip": false,
 
20
  },
21
  "errors": "replace",
22
  "model_max_length": 2048,
23
+ "pad_token": {
24
+ "__type": "AddedToken",
25
+ "content": "<|pad|>",
26
+ "lstrip": false,
27
+ "normalized": true,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
  "tokenizer_class": "GPT2Tokenizer",
32
  "unk_token": {
33
  "__type": "AddedToken",