codebyzeb commited on
Commit
3260a67
1 Parent(s): 792f6de

Upload tokenizer

Browse files
Files changed (3) hide show
  1. tokenizer.json +82 -38
  2. tokenizer_config.json +1 -1
  3. vocab.json +1 -1
tokenizer.json CHANGED
@@ -22,7 +22,7 @@
22
  "special": true
23
  },
24
  {
25
- "id": 5,
26
  "content": "UTT_BOUNDARY",
27
  "single_word": false,
28
  "lstrip": false,
@@ -34,13 +34,6 @@
34
  "normalizer": {
35
  "type": "Sequence",
36
  "normalizers": [
37
- {
38
- "type": "Replace",
39
- "pattern": {
40
- "String": "\n"
41
- },
42
- "content": " UTT_BOUNDARY"
43
- },
44
  {
45
  "type": "Strip",
46
  "strip_left": true,
@@ -51,42 +44,93 @@
51
  "pre_tokenizer": {
52
  "type": "Whitespace"
53
  },
54
- "post_processor": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  "decoder": null,
56
  "model": {
57
  "type": "WordLevel",
58
  "vocab": {
59
  "UNK": 0,
60
  "PAD": 1,
61
- "BOS": 2,
62
- "EOS": 3,
63
- "WORD_BOUNDARY": 4,
64
- "UTT_BOUNDARY": 5,
65
- "k": 6,
66
- "s": 7,
67
- "o": 8,
68
- "b": 9,
69
- "a": 10,
70
- "h": 11,
71
- "n": 12,
72
- "t̠ʃ": 13,
73
- "i": 14,
74
- "j": 15,
75
- "d": 16,
76
- "e": 17,
77
- "ʃ": 18,
78
- "u": 19,
79
- "ɡ": 20,
80
- "r": 21,
81
- "f": 22,
82
- "t": 23,
83
- "m": 24,
84
- "d̠ʒ": 25,
85
- "l": 26,
86
- "q": 27,
87
- "v": 28,
88
- "z": 29,
89
- "p": 30
90
  },
91
  "unk_token": "UNK"
92
  }
 
22
  "special": true
23
  },
24
  {
25
+ "id": 3,
26
  "content": "UTT_BOUNDARY",
27
  "single_word": false,
28
  "lstrip": false,
 
34
  "normalizer": {
35
  "type": "Sequence",
36
  "normalizers": [
 
 
 
 
 
 
 
37
  {
38
  "type": "Strip",
39
  "strip_left": true,
 
44
  "pre_tokenizer": {
45
  "type": "Whitespace"
46
  },
47
+ "post_processor": {
48
+ "type": "TemplateProcessing",
49
+ "single": [
50
+ {
51
+ "SpecialToken": {
52
+ "id": "UTT_BOUNDARY",
53
+ "type_id": 0
54
+ }
55
+ },
56
+ {
57
+ "Sequence": {
58
+ "id": "A",
59
+ "type_id": 0
60
+ }
61
+ }
62
+ ],
63
+ "pair": [
64
+ {
65
+ "SpecialToken": {
66
+ "id": "UTT_BOUNDARY",
67
+ "type_id": 0
68
+ }
69
+ },
70
+ {
71
+ "Sequence": {
72
+ "id": "A",
73
+ "type_id": 0
74
+ }
75
+ },
76
+ {
77
+ "SpecialToken": {
78
+ "id": "UTT_BOUNDARY",
79
+ "type_id": 0
80
+ }
81
+ },
82
+ {
83
+ "Sequence": {
84
+ "id": "B",
85
+ "type_id": 1
86
+ }
87
+ }
88
+ ],
89
+ "special_tokens": {
90
+ "UTT_BOUNDARY": {
91
+ "id": "UTT_BOUNDARY",
92
+ "ids": [
93
+ 3
94
+ ],
95
+ "tokens": [
96
+ "UTT_BOUNDARY"
97
+ ]
98
+ }
99
+ }
100
+ },
101
  "decoder": null,
102
  "model": {
103
  "type": "WordLevel",
104
  "vocab": {
105
  "UNK": 0,
106
  "PAD": 1,
107
+ "WORD_BOUNDARY": 2,
108
+ "UTT_BOUNDARY": 3,
109
+ "k": 4,
110
+ "s": 5,
111
+ "o": 6,
112
+ "b": 7,
113
+ "a": 8,
114
+ "h": 9,
115
+ "n": 10,
116
+ "t̠ʃ": 11,
117
+ "i": 12,
118
+ "j": 13,
119
+ "d": 14,
120
+ "e": 15,
121
+ "ʃ": 16,
122
+ "u": 17,
123
+ "ɡ": 18,
124
+ "r": 19,
125
+ "f": 20,
126
+ "t": 21,
127
+ "m": 22,
128
+ "d̠ʒ": 23,
129
+ "l": 24,
130
+ "q": 25,
131
+ "v": 26,
132
+ "z": 27,
133
+ "p": 28
 
 
134
  },
135
  "unk_token": "UNK"
136
  }
tokenizer_config.json CHANGED
@@ -17,7 +17,7 @@
17
  "single_word": false,
18
  "special": true
19
  },
20
- "5": {
21
  "content": "UTT_BOUNDARY",
22
  "lstrip": false,
23
  "normalized": false,
 
17
  "single_word": false,
18
  "special": true
19
  },
20
+ "3": {
21
  "content": "UTT_BOUNDARY",
22
  "lstrip": false,
23
  "normalized": false,
vocab.json CHANGED
@@ -1 +1 @@
1
- {"UNK":0,"PAD":1,"BOS":2,"EOS":3,"WORD_BOUNDARY":4,"UTT_BOUNDARY":5,"k":6,"s":7,"o":8,"b":9,"a":10,"h":11,"n":12,"t̠ʃ":13,"i":14,"j":15,"d":16,"e":17,"ʃ":18,"u":19,"ɡ":20,"r":21,"f":22,"t":23,"m":24,"d̠ʒ":25,"l":26,"q":27,"v":28,"z":29,"p":30}
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"k":4,"s":5,"o":6,"b":7,"a":8,"h":9,"n":10,"t̠ʃ":11,"i":12,"j":13,"d":14,"e":15,"ʃ":16,"u":17,"ɡ":18,"r":19,"f":20,"t":21,"m":22,"d̠ʒ":23,"l":24,"q":25,"v":26,"z":27,"p":28}