patrickvonplaten commited on
Commit
c009356
1 Parent(s): 642ae4b
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<BOS>": 50258,
3
+ "<EOS>": 50259,
4
+ "<PAD>": 50257
5
+ }
config.json CHANGED
@@ -5,9 +5,10 @@
5
  "GPT2OptimusForLatentConnector"
6
  ],
7
  "attn_pdrop": 0.1,
8
- "bos_token_id": 50256,
9
  "embd_pdrop": 0.1,
10
- "eos_token_id": 50256,
 
11
  "id2label": {
12
  "0": "LABEL_0"
13
  },
 
5
  "GPT2OptimusForLatentConnector"
6
  ],
7
  "attn_pdrop": 0.1,
8
+ "bos_token_id": 50258,
9
  "embd_pdrop": 0.1,
10
+ "eos_token_id": 50259,
11
+ "pad_token_id": 50257,
12
  "id2label": {
13
  "0": "LABEL_0"
14
  },
special_tokens_map.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<BOS>",
3
+ "eos_token": "<EOS>",
4
+ "pad_token": "<PAD>",
5
+ "unk_token": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ }
12
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "name_or_path": "./",
22
+ "pad_token": null,
23
+ "special_tokens_map_file": null,
24
+ "tokenizer_class": "GPT2Tokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
vocab.json CHANGED
The diff for this file is too large to render. See raw diff