Zeb commited on
Commit
0a61240
1 Parent(s): 898f5aa

Upload model

Browse files
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 3,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 3,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 768,
14
+ "n_head": 12,
15
+ "n_inner": 3072,
16
+ "n_layer": 12,
17
+ "n_positions": 256,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.1,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.43.3",
29
+ "use_cache": true,
30
+ "vocab_size": 115
31
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b283429a987d18374784eab74ec5e0e1dc3c65e350fdc09b9cfcf5b6e88b7309
3
+ size 341378688
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "UTT_BOUNDARY",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "UTT_BOUNDARY",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "PAD",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "UNK",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "UNK",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "PAD",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 3,
26
+ "content": "UTT_BOUNDARY",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ }
33
+ ],
34
+ "normalizer": {
35
+ "type": "Sequence",
36
+ "normalizers": [
37
+ {
38
+ "type": "NFD"
39
+ },
40
+ {
41
+ "type": "Lowercase"
42
+ },
43
+ {
44
+ "type": "Strip",
45
+ "strip_left": true,
46
+ "strip_right": true
47
+ },
48
+ {
49
+ "type": "StripAccents"
50
+ },
51
+ {
52
+ "type": "Replace",
53
+ "pattern": {
54
+ "String": " "
55
+ },
56
+ "content": ""
57
+ }
58
+ ]
59
+ },
60
+ "pre_tokenizer": {
61
+ "type": "Split",
62
+ "pattern": {
63
+ "String": ""
64
+ },
65
+ "behavior": "Isolated",
66
+ "invert": false
67
+ },
68
+ "post_processor": {
69
+ "type": "TemplateProcessing",
70
+ "single": [
71
+ {
72
+ "SpecialToken": {
73
+ "id": "UTT_BOUNDARY",
74
+ "type_id": 0
75
+ }
76
+ },
77
+ {
78
+ "Sequence": {
79
+ "id": "A",
80
+ "type_id": 0
81
+ }
82
+ }
83
+ ],
84
+ "pair": [
85
+ {
86
+ "SpecialToken": {
87
+ "id": "UTT_BOUNDARY",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "A",
94
+ "type_id": 0
95
+ }
96
+ },
97
+ {
98
+ "SpecialToken": {
99
+ "id": "UTT_BOUNDARY",
100
+ "type_id": 0
101
+ }
102
+ },
103
+ {
104
+ "Sequence": {
105
+ "id": "B",
106
+ "type_id": 1
107
+ }
108
+ }
109
+ ],
110
+ "special_tokens": {
111
+ "UTT_BOUNDARY": {
112
+ "id": "UTT_BOUNDARY",
113
+ "ids": [
114
+ 3
115
+ ],
116
+ "tokens": [
117
+ "UTT_BOUNDARY"
118
+ ]
119
+ }
120
+ }
121
+ },
122
+ "decoder": null,
123
+ "model": {
124
+ "type": "WordLevel",
125
+ "vocab": {
126
+ "UNK": 0,
127
+ "PAD": 1,
128
+ "W": 2,
129
+ "UTT_BOUNDARY": 3,
130
+ "y": 4,
131
+ "e": 5,
132
+ "a": 6,
133
+ "h": 7,
134
+ ".": 8,
135
+ "c": 9,
136
+ "o": 10,
137
+ "m": 11,
138
+ "p": 12,
139
+ "u": 13,
140
+ "n": 14,
141
+ "d": 15,
142
+ "'": 16,
143
+ "s": 17,
144
+ "t": 18,
145
+ "i": 19,
146
+ "g": 20,
147
+ "l": 21,
148
+ "k": 22,
149
+ "x": 23,
150
+ ",": 24,
151
+ "r": 25,
152
+ "w": 26,
153
+ "v": 27,
154
+ "f": 28,
155
+ "b": 29,
156
+ "j": 30,
157
+ "?": 31,
158
+ "-": 32,
159
+ "q": 33,
160
+ ";": 34,
161
+ "2": 35,
162
+ "‘": 36,
163
+ "’": 37,
164
+ "!": 38,
165
+ "/": 39,
166
+ "1": 40,
167
+ ":": 41,
168
+ "z": 42,
169
+ "3": 43,
170
+ "6": 44,
171
+ "9": 45,
172
+ "&": 46,
173
+ "4": 47,
174
+ "5": 48,
175
+ "0": 49,
176
+ "=": 50,
177
+ "8": 51,
178
+ "7": 52,
179
+ "£": 53,
180
+ "(": 54,
181
+ ")": 55,
182
+ "—": 56,
183
+ "*": 57,
184
+ "]": 58,
185
+ "[": 59,
186
+ "\"": 60,
187
+ "_": 61,
188
+ "%": 62,
189
+ "“": 63,
190
+ "”": 64,
191
+ "+": 65,
192
+ "$": 66,
193
+ "^": 67,
194
+ "#": 68,
195
+ "æ": 69,
196
+ "ʌ": 70,
197
+ "ɩ": 71,
198
+ "ə": 72,
199
+ "↫": 73,
200
+ "|": 74,
201
+ "°": 75,
202
+ "ø": 76,
203
+ "~": 77,
204
+ "⁄": 78,
205
+ "`": 79,
206
+ "�": 80,
207
+ "′": 81,
208
+ "@": 82,
209
+ "}": 83,
210
+ "{": 84,
211
+ "―": 85,
212
+ "–": 86,
213
+ "·": 87,
214
+ "♪": 88,
215
+ "¡": 89,
216
+ "÷": 90,
217
+ "\\": 91,
218
+ "¶": 92,
219
+ "ð": 93,
220
+ "¿": 94,
221
+ "­": 95,
222
+ "♫": 96,
223
+ "​": 97,
224
+ "œ": 98,
225
+ "ł": 99,
226
+ "¦": 100,
227
+ "×": 101,
228
+ "™": 102,
229
+ "ß": 103,
230
+ "ˈ": 104,
231
+ "ı": 105,
232
+ "đ": 106,
233
+ "−": 107,
234
+ "ː": 108,
235
+ "•": 109,
236
+ "⟨": 110,
237
+ "⟩": 111,
238
+ "ŋ": 112,
239
+ "ʼ": 113,
240
+ "\t": 114
241
+ },
242
+ "unk_token": "UNK"
243
+ }
244
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "UNK",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "PAD",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "3": {
21
+ "content": "UTT_BOUNDARY",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "bos_token": "UTT_BOUNDARY",
30
+ "clean_up_tokenization_spaces": true,
31
+ "eos_token": "UTT_BOUNDARY",
32
+ "model_max_length": 1000000000000000019884624838656,
33
+ "pad_token": "PAD",
34
+ "tokenizer_class": "GPT2Tokenizer",
35
+ "unk_token": "UNK"
36
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03fa734da6cb25f5559da3afe4f24c337b478930763d251de0bc666d081e17fb
3
+ size 5368
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"UNK":0,"PAD":1,"W":2,"UTT_BOUNDARY":3,"y":4,"e":5,"a":6,"h":7,".":8,"c":9,"o":10,"m":11,"p":12,"u":13,"n":14,"d":15,"'":16,"s":17,"t":18,"i":19,"g":20,"l":21,"k":22,"x":23,",":24,"r":25,"w":26,"v":27,"f":28,"b":29,"j":30,"?":31,"-":32,"q":33,";":34,"2":35,"‘":36,"’":37,"!":38,"/":39,"1":40,":":41,"z":42,"3":43,"6":44,"9":45,"&":46,"4":47,"5":48,"0":49,"=":50,"8":51,"7":52,"£":53,"(":54,")":55,"—":56,"*":57,"]":58,"[":59,"\"":60,"_":61,"%":62,"“":63,"”":64,"+":65,"$":66,"^":67,"#":68,"æ":69,"ʌ":70,"ɩ":71,"ə":72,"↫":73,"|":74,"°":75,"ø":76,"~":77,"⁄":78,"`":79,"�":80,"′":81,"@":82,"}":83,"{":84,"―":85,"–":86,"·":87,"♪":88,"¡":89,"÷":90,"\\":91,"¶":92,"ð":93,"¿":94,"­":95,"♫":96,"​":97,"œ":98,"ł":99,"¦":100,"×":101,"™":102,"ß":103,"ˈ":104,"ı":105,"đ":106,"−":107,"ː":108,"•":109,"⟨":110,"⟩":111,"ŋ":112,"ʼ":113,"\t":114}