Kushagra07 commited on
Commit
249278e
1 Parent(s): ae64e70

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - image-classification
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
8
+ example_title: Tiger
9
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
10
+ example_title: Teapot
11
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
12
+ example_title: Palace
13
+ datasets:
14
+ - autotrain-swinv2-tiny-patch4-window8-256/autotrain-data
15
+ ---
16
+
17
+ # Model Trained Using AutoTrain
18
+
19
+ - Problem type: Image Classification
20
+
21
+ ## Validation Metrics
22
+ loss: 0.2802155911922455
23
+
24
+ f1_macro: 0.7552413664079601
25
+
26
+ f1_micro: 0.9061113007852509
27
+
28
+ f1_weighted: 0.9035670983782715
29
+
30
+ precision_macro: 0.8498221458985794
31
+
32
+ precision_micro: 0.9061113007852509
33
+
34
+ precision_weighted: 0.9095288940815534
35
+
36
+ recall_macro: 0.7266443770545475
37
+
38
+ recall_micro: 0.9061113007852509
39
+
40
+ recall_weighted: 0.9061113007852509
41
+
42
+ accuracy: 0.9061113007852509
checkpoint-4386/config.json ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/swinv2-tiny-patch4-window8-256",
3
+ "_num_labels": 17,
4
+ "architectures": [
5
+ "Swinv2ForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "depths": [
9
+ 2,
10
+ 2,
11
+ 6,
12
+ 2
13
+ ],
14
+ "drop_path_rate": 0.1,
15
+ "embed_dim": 96,
16
+ "encoder_stride": 32,
17
+ "hidden_act": "gelu",
18
+ "hidden_dropout_prob": 0.0,
19
+ "hidden_size": 768,
20
+ "id2label": {
21
+ "0": "AadhaarBack",
22
+ "1": "AadhaarBackMasked",
23
+ "2": "AadhaarFront",
24
+ "3": "AadhaarFrontMasked",
25
+ "4": "AadhaarRegular",
26
+ "5": "AadhaarRegularMasked",
27
+ "6": "AadhaarSelfGenerated",
28
+ "7": "DrivingLicenseNew",
29
+ "8": "DrivingLicenseOld",
30
+ "9": "MultipleOVDs",
31
+ "10": "PanCard",
32
+ "11": "PassportFirst",
33
+ "12": "PassportLast",
34
+ "13": "PassportRegular",
35
+ "14": "VoterCardBack",
36
+ "15": "VoterCardFront",
37
+ "16": "VoterCardRegular"
38
+ },
39
+ "image_size": 256,
40
+ "initializer_range": 0.02,
41
+ "label2id": {
42
+ "AadhaarBack": 0,
43
+ "AadhaarBackMasked": 1,
44
+ "AadhaarFront": 2,
45
+ "AadhaarFrontMasked": 3,
46
+ "AadhaarRegular": 4,
47
+ "AadhaarRegularMasked": 5,
48
+ "AadhaarSelfGenerated": 6,
49
+ "DrivingLicenseNew": 7,
50
+ "DrivingLicenseOld": 8,
51
+ "MultipleOVDs": 9,
52
+ "PanCard": 10,
53
+ "PassportFirst": 11,
54
+ "PassportLast": 12,
55
+ "PassportRegular": 13,
56
+ "VoterCardBack": 14,
57
+ "VoterCardFront": 15,
58
+ "VoterCardRegular": 16
59
+ },
60
+ "layer_norm_eps": 1e-05,
61
+ "mlp_ratio": 4.0,
62
+ "model_type": "swinv2",
63
+ "num_channels": 3,
64
+ "num_heads": [
65
+ 3,
66
+ 6,
67
+ 12,
68
+ 24
69
+ ],
70
+ "num_layers": 4,
71
+ "out_features": [
72
+ "stage4"
73
+ ],
74
+ "out_indices": [
75
+ 4
76
+ ],
77
+ "patch_size": 4,
78
+ "path_norm": true,
79
+ "pretrained_window_sizes": [
80
+ 0,
81
+ 0,
82
+ 0,
83
+ 0
84
+ ],
85
+ "problem_type": "single_label_classification",
86
+ "qkv_bias": true,
87
+ "stage_names": [
88
+ "stem",
89
+ "stage1",
90
+ "stage2",
91
+ "stage3",
92
+ "stage4"
93
+ ],
94
+ "torch_dtype": "float32",
95
+ "transformers_version": "4.40.1",
96
+ "use_absolute_embeddings": false,
97
+ "window_size": 8
98
+ }
checkpoint-4386/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27b710e007c37457688de5a0f4b11dee19fa7b8c2f1a4371d8ddca2ddc5484a1
3
+ size 110396292
checkpoint-4386/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fbdd3f8c790293e61f9790184660e095cc372e01075874bf1dd0ee20fc73609
3
+ size 220933562
checkpoint-4386/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15e41c5c98f9bf77d037ceeb70a991753e70502f1c81723240cbd3212cea7379
3
+ size 14244
checkpoint-4386/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a9520946c87e17c4825a16ec41ef338fab797f8f1213c506e0a88b430b9bba4
3
+ size 1064
checkpoint-4386/trainer_state.json ADDED
@@ -0,0 +1,1300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.2802155911922455,
3
+ "best_model_checkpoint": "autotrain-swinv2-tiny-patch4-window8-256/checkpoint-4386",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 4386,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01709986320109439,
13
+ "grad_norm": 16.21043586730957,
14
+ "learning_rate": 7.86593707250342e-07,
15
+ "loss": 2.7812,
16
+ "step": 25
17
+ },
18
+ {
19
+ "epoch": 0.03419972640218878,
20
+ "grad_norm": 21.944915771484375,
21
+ "learning_rate": 1.6415868673050617e-06,
22
+ "loss": 2.6916,
23
+ "step": 50
24
+ },
25
+ {
26
+ "epoch": 0.05129958960328317,
27
+ "grad_norm": 15.82861328125,
28
+ "learning_rate": 2.496580027359781e-06,
29
+ "loss": 2.5256,
30
+ "step": 75
31
+ },
32
+ {
33
+ "epoch": 0.06839945280437756,
34
+ "grad_norm": 28.86319351196289,
35
+ "learning_rate": 3.3515731874145007e-06,
36
+ "loss": 2.2838,
37
+ "step": 100
38
+ },
39
+ {
40
+ "epoch": 0.08549931600547196,
41
+ "grad_norm": 23.919939041137695,
42
+ "learning_rate": 4.172366621067032e-06,
43
+ "loss": 1.993,
44
+ "step": 125
45
+ },
46
+ {
47
+ "epoch": 0.10259917920656635,
48
+ "grad_norm": 22.423189163208008,
49
+ "learning_rate": 5.027359781121752e-06,
50
+ "loss": 1.9762,
51
+ "step": 150
52
+ },
53
+ {
54
+ "epoch": 0.11969904240766074,
55
+ "grad_norm": 18.18659210205078,
56
+ "learning_rate": 5.882352941176471e-06,
57
+ "loss": 1.6428,
58
+ "step": 175
59
+ },
60
+ {
61
+ "epoch": 0.13679890560875513,
62
+ "grad_norm": 25.3078670501709,
63
+ "learning_rate": 6.73734610123119e-06,
64
+ "loss": 1.6677,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 0.1538987688098495,
69
+ "grad_norm": 33.7442626953125,
70
+ "learning_rate": 7.592339261285911e-06,
71
+ "loss": 1.5048,
72
+ "step": 225
73
+ },
74
+ {
75
+ "epoch": 0.17099863201094392,
76
+ "grad_norm": 44.62141799926758,
77
+ "learning_rate": 8.44733242134063e-06,
78
+ "loss": 1.3579,
79
+ "step": 250
80
+ },
81
+ {
82
+ "epoch": 0.1880984952120383,
83
+ "grad_norm": 47.94766616821289,
84
+ "learning_rate": 9.26812585499316e-06,
85
+ "loss": 1.3141,
86
+ "step": 275
87
+ },
88
+ {
89
+ "epoch": 0.2051983584131327,
90
+ "grad_norm": 50.67353820800781,
91
+ "learning_rate": 1.0123119015047879e-05,
92
+ "loss": 1.5654,
93
+ "step": 300
94
+ },
95
+ {
96
+ "epoch": 0.22229822161422708,
97
+ "grad_norm": 34.55750274658203,
98
+ "learning_rate": 1.09781121751026e-05,
99
+ "loss": 1.3736,
100
+ "step": 325
101
+ },
102
+ {
103
+ "epoch": 0.2393980848153215,
104
+ "grad_norm": 29.00438117980957,
105
+ "learning_rate": 1.183310533515732e-05,
106
+ "loss": 1.274,
107
+ "step": 350
108
+ },
109
+ {
110
+ "epoch": 0.25649794801641584,
111
+ "grad_norm": 24.081707000732422,
112
+ "learning_rate": 1.2688098495212038e-05,
113
+ "loss": 1.2503,
114
+ "step": 375
115
+ },
116
+ {
117
+ "epoch": 0.27359781121751026,
118
+ "grad_norm": 23.609891891479492,
119
+ "learning_rate": 1.354309165526676e-05,
120
+ "loss": 1.0883,
121
+ "step": 400
122
+ },
123
+ {
124
+ "epoch": 0.29069767441860467,
125
+ "grad_norm": 18.259571075439453,
126
+ "learning_rate": 1.4398084815321477e-05,
127
+ "loss": 1.1379,
128
+ "step": 425
129
+ },
130
+ {
131
+ "epoch": 0.307797537619699,
132
+ "grad_norm": 42.96183395385742,
133
+ "learning_rate": 1.5253077975376198e-05,
134
+ "loss": 1.2596,
135
+ "step": 450
136
+ },
137
+ {
138
+ "epoch": 0.32489740082079344,
139
+ "grad_norm": 37.27230453491211,
140
+ "learning_rate": 1.6108071135430915e-05,
141
+ "loss": 1.0158,
142
+ "step": 475
143
+ },
144
+ {
145
+ "epoch": 0.34199726402188785,
146
+ "grad_norm": 113.23546600341797,
147
+ "learning_rate": 1.6963064295485636e-05,
148
+ "loss": 1.0687,
149
+ "step": 500
150
+ },
151
+ {
152
+ "epoch": 0.3590971272229822,
153
+ "grad_norm": 19.023685455322266,
154
+ "learning_rate": 1.7818057455540357e-05,
155
+ "loss": 0.9566,
156
+ "step": 525
157
+ },
158
+ {
159
+ "epoch": 0.3761969904240766,
160
+ "grad_norm": 29.44492530822754,
161
+ "learning_rate": 1.8673050615595075e-05,
162
+ "loss": 1.128,
163
+ "step": 550
164
+ },
165
+ {
166
+ "epoch": 0.393296853625171,
167
+ "grad_norm": 42.041595458984375,
168
+ "learning_rate": 1.9528043775649796e-05,
169
+ "loss": 1.0133,
170
+ "step": 575
171
+ },
172
+ {
173
+ "epoch": 0.4103967168262654,
174
+ "grad_norm": 47.55967712402344,
175
+ "learning_rate": 2.0383036935704516e-05,
176
+ "loss": 1.0888,
177
+ "step": 600
178
+ },
179
+ {
180
+ "epoch": 0.4274965800273598,
181
+ "grad_norm": 12.591029167175293,
182
+ "learning_rate": 2.1238030095759234e-05,
183
+ "loss": 0.9936,
184
+ "step": 625
185
+ },
186
+ {
187
+ "epoch": 0.44459644322845415,
188
+ "grad_norm": 31.012723922729492,
189
+ "learning_rate": 2.2093023255813955e-05,
190
+ "loss": 0.9765,
191
+ "step": 650
192
+ },
193
+ {
194
+ "epoch": 0.46169630642954856,
195
+ "grad_norm": 39.08427047729492,
196
+ "learning_rate": 2.2948016415868672e-05,
197
+ "loss": 0.9398,
198
+ "step": 675
199
+ },
200
+ {
201
+ "epoch": 0.478796169630643,
202
+ "grad_norm": 34.639007568359375,
203
+ "learning_rate": 2.3803009575923393e-05,
204
+ "loss": 0.7951,
205
+ "step": 700
206
+ },
207
+ {
208
+ "epoch": 0.49589603283173733,
209
+ "grad_norm": 40.917171478271484,
210
+ "learning_rate": 2.4658002735978114e-05,
211
+ "loss": 0.7592,
212
+ "step": 725
213
+ },
214
+ {
215
+ "epoch": 0.5129958960328317,
216
+ "grad_norm": 27.42568016052246,
217
+ "learning_rate": 2.5512995896032832e-05,
218
+ "loss": 0.8023,
219
+ "step": 750
220
+ },
221
+ {
222
+ "epoch": 0.5300957592339262,
223
+ "grad_norm": 10.912271499633789,
224
+ "learning_rate": 2.6367989056087556e-05,
225
+ "loss": 0.832,
226
+ "step": 775
227
+ },
228
+ {
229
+ "epoch": 0.5471956224350205,
230
+ "grad_norm": 35.33407211303711,
231
+ "learning_rate": 2.7222982216142274e-05,
232
+ "loss": 0.8747,
233
+ "step": 800
234
+ },
235
+ {
236
+ "epoch": 0.5642954856361149,
237
+ "grad_norm": 23.503469467163086,
238
+ "learning_rate": 2.807797537619699e-05,
239
+ "loss": 0.9251,
240
+ "step": 825
241
+ },
242
+ {
243
+ "epoch": 0.5813953488372093,
244
+ "grad_norm": 28.509077072143555,
245
+ "learning_rate": 2.893296853625171e-05,
246
+ "loss": 0.9039,
247
+ "step": 850
248
+ },
249
+ {
250
+ "epoch": 0.5984952120383037,
251
+ "grad_norm": 20.486900329589844,
252
+ "learning_rate": 2.9787961696306433e-05,
253
+ "loss": 0.9576,
254
+ "step": 875
255
+ },
256
+ {
257
+ "epoch": 0.615595075239398,
258
+ "grad_norm": 26.435588836669922,
259
+ "learning_rate": 3.064295485636115e-05,
260
+ "loss": 0.8952,
261
+ "step": 900
262
+ },
263
+ {
264
+ "epoch": 0.6326949384404925,
265
+ "grad_norm": 25.732032775878906,
266
+ "learning_rate": 3.149794801641587e-05,
267
+ "loss": 0.8222,
268
+ "step": 925
269
+ },
270
+ {
271
+ "epoch": 0.6497948016415869,
272
+ "grad_norm": 33.15768051147461,
273
+ "learning_rate": 3.235294117647059e-05,
274
+ "loss": 0.9005,
275
+ "step": 950
276
+ },
277
+ {
278
+ "epoch": 0.6668946648426812,
279
+ "grad_norm": 35.35673141479492,
280
+ "learning_rate": 3.3207934336525306e-05,
281
+ "loss": 1.081,
282
+ "step": 975
283
+ },
284
+ {
285
+ "epoch": 0.6839945280437757,
286
+ "grad_norm": 46.90694046020508,
287
+ "learning_rate": 3.406292749658003e-05,
288
+ "loss": 0.7733,
289
+ "step": 1000
290
+ },
291
+ {
292
+ "epoch": 0.70109439124487,
293
+ "grad_norm": 26.313335418701172,
294
+ "learning_rate": 3.491792065663475e-05,
295
+ "loss": 0.919,
296
+ "step": 1025
297
+ },
298
+ {
299
+ "epoch": 0.7181942544459644,
300
+ "grad_norm": 44.662662506103516,
301
+ "learning_rate": 3.577291381668947e-05,
302
+ "loss": 0.6723,
303
+ "step": 1050
304
+ },
305
+ {
306
+ "epoch": 0.7352941176470589,
307
+ "grad_norm": 6.602749347686768,
308
+ "learning_rate": 3.662790697674418e-05,
309
+ "loss": 0.7601,
310
+ "step": 1075
311
+ },
312
+ {
313
+ "epoch": 0.7523939808481532,
314
+ "grad_norm": 38.393550872802734,
315
+ "learning_rate": 3.748290013679891e-05,
316
+ "loss": 0.748,
317
+ "step": 1100
318
+ },
319
+ {
320
+ "epoch": 0.7694938440492476,
321
+ "grad_norm": 11.72321891784668,
322
+ "learning_rate": 3.8337893296853625e-05,
323
+ "loss": 0.7516,
324
+ "step": 1125
325
+ },
326
+ {
327
+ "epoch": 0.786593707250342,
328
+ "grad_norm": 11.254487991333008,
329
+ "learning_rate": 3.9192886456908346e-05,
330
+ "loss": 0.7186,
331
+ "step": 1150
332
+ },
333
+ {
334
+ "epoch": 0.8036935704514364,
335
+ "grad_norm": 44.11043930053711,
336
+ "learning_rate": 4.004787961696307e-05,
337
+ "loss": 0.7565,
338
+ "step": 1175
339
+ },
340
+ {
341
+ "epoch": 0.8207934336525308,
342
+ "grad_norm": 11.987720489501953,
343
+ "learning_rate": 4.090287277701779e-05,
344
+ "loss": 0.7304,
345
+ "step": 1200
346
+ },
347
+ {
348
+ "epoch": 0.8378932968536251,
349
+ "grad_norm": 17.10840606689453,
350
+ "learning_rate": 4.17578659370725e-05,
351
+ "loss": 0.8912,
352
+ "step": 1225
353
+ },
354
+ {
355
+ "epoch": 0.8549931600547196,
356
+ "grad_norm": 19.49997901916504,
357
+ "learning_rate": 4.261285909712722e-05,
358
+ "loss": 0.9355,
359
+ "step": 1250
360
+ },
361
+ {
362
+ "epoch": 0.872093023255814,
363
+ "grad_norm": 12.431605339050293,
364
+ "learning_rate": 4.3467852257181944e-05,
365
+ "loss": 0.7146,
366
+ "step": 1275
367
+ },
368
+ {
369
+ "epoch": 0.8891928864569083,
370
+ "grad_norm": 20.540786743164062,
371
+ "learning_rate": 4.4322845417236665e-05,
372
+ "loss": 0.8042,
373
+ "step": 1300
374
+ },
375
+ {
376
+ "epoch": 0.9062927496580028,
377
+ "grad_norm": 28.939634323120117,
378
+ "learning_rate": 4.517783857729138e-05,
379
+ "loss": 0.7885,
380
+ "step": 1325
381
+ },
382
+ {
383
+ "epoch": 0.9233926128590971,
384
+ "grad_norm": 27.024660110473633,
385
+ "learning_rate": 4.6032831737346106e-05,
386
+ "loss": 0.7953,
387
+ "step": 1350
388
+ },
389
+ {
390
+ "epoch": 0.9404924760601915,
391
+ "grad_norm": 47.79359436035156,
392
+ "learning_rate": 4.688782489740082e-05,
393
+ "loss": 0.759,
394
+ "step": 1375
395
+ },
396
+ {
397
+ "epoch": 0.957592339261286,
398
+ "grad_norm": 18.608360290527344,
399
+ "learning_rate": 4.774281805745554e-05,
400
+ "loss": 0.7392,
401
+ "step": 1400
402
+ },
403
+ {
404
+ "epoch": 0.9746922024623803,
405
+ "grad_norm": 16.670150756835938,
406
+ "learning_rate": 4.859781121751026e-05,
407
+ "loss": 0.664,
408
+ "step": 1425
409
+ },
410
+ {
411
+ "epoch": 0.9917920656634747,
412
+ "grad_norm": 21.591880798339844,
413
+ "learning_rate": 4.945280437756498e-05,
414
+ "loss": 0.7628,
415
+ "step": 1450
416
+ },
417
+ {
418
+ "epoch": 1.0,
419
+ "eval_accuracy": 0.7722772277227723,
420
+ "eval_f1_macro": 0.3963476960209859,
421
+ "eval_f1_micro": 0.7722772277227723,
422
+ "eval_f1_weighted": 0.7524459692668548,
423
+ "eval_loss": 0.6640351414680481,
424
+ "eval_precision_macro": 0.5662665685743159,
425
+ "eval_precision_micro": 0.7722772277227723,
426
+ "eval_precision_weighted": 0.8150598854310834,
427
+ "eval_recall_macro": 0.4019843036358822,
428
+ "eval_recall_micro": 0.7722772277227723,
429
+ "eval_recall_weighted": 0.7722772277227723,
430
+ "eval_runtime": 19.424,
431
+ "eval_samples_per_second": 150.793,
432
+ "eval_steps_per_second": 9.473,
433
+ "step": 1462
434
+ },
435
+ {
436
+ "epoch": 1.008891928864569,
437
+ "grad_norm": 18.68697738647461,
438
+ "learning_rate": 4.996580027359781e-05,
439
+ "loss": 0.6562,
440
+ "step": 1475
441
+ },
442
+ {
443
+ "epoch": 1.0259917920656634,
444
+ "grad_norm": 26.281583786010742,
445
+ "learning_rate": 4.9870801033591734e-05,
446
+ "loss": 0.7318,
447
+ "step": 1500
448
+ },
449
+ {
450
+ "epoch": 1.043091655266758,
451
+ "grad_norm": 26.66839599609375,
452
+ "learning_rate": 4.977580179358565e-05,
453
+ "loss": 0.803,
454
+ "step": 1525
455
+ },
456
+ {
457
+ "epoch": 1.0601915184678523,
458
+ "grad_norm": 10.613127708435059,
459
+ "learning_rate": 4.9680802553579575e-05,
460
+ "loss": 0.536,
461
+ "step": 1550
462
+ },
463
+ {
464
+ "epoch": 1.0772913816689467,
465
+ "grad_norm": 13.497079849243164,
466
+ "learning_rate": 4.958580331357349e-05,
467
+ "loss": 0.6842,
468
+ "step": 1575
469
+ },
470
+ {
471
+ "epoch": 1.094391244870041,
472
+ "grad_norm": 9.89592170715332,
473
+ "learning_rate": 4.9490804073567415e-05,
474
+ "loss": 0.6305,
475
+ "step": 1600
476
+ },
477
+ {
478
+ "epoch": 1.1114911080711354,
479
+ "grad_norm": 16.67163848876953,
480
+ "learning_rate": 4.939580483356133e-05,
481
+ "loss": 0.7628,
482
+ "step": 1625
483
+ },
484
+ {
485
+ "epoch": 1.1285909712722297,
486
+ "grad_norm": 42.5455207824707,
487
+ "learning_rate": 4.9300805593555256e-05,
488
+ "loss": 0.6883,
489
+ "step": 1650
490
+ },
491
+ {
492
+ "epoch": 1.1456908344733243,
493
+ "grad_norm": 10.086162567138672,
494
+ "learning_rate": 4.920580635354917e-05,
495
+ "loss": 0.6851,
496
+ "step": 1675
497
+ },
498
+ {
499
+ "epoch": 1.1627906976744187,
500
+ "grad_norm": 15.008639335632324,
501
+ "learning_rate": 4.9110807113543096e-05,
502
+ "loss": 0.7015,
503
+ "step": 1700
504
+ },
505
+ {
506
+ "epoch": 1.179890560875513,
507
+ "grad_norm": 36.36772155761719,
508
+ "learning_rate": 4.901580787353701e-05,
509
+ "loss": 0.7014,
510
+ "step": 1725
511
+ },
512
+ {
513
+ "epoch": 1.1969904240766074,
514
+ "grad_norm": 24.153322219848633,
515
+ "learning_rate": 4.892080863353094e-05,
516
+ "loss": 0.6344,
517
+ "step": 1750
518
+ },
519
+ {
520
+ "epoch": 1.2140902872777017,
521
+ "grad_norm": 14.07002067565918,
522
+ "learning_rate": 4.8825809393524854e-05,
523
+ "loss": 0.7835,
524
+ "step": 1775
525
+ },
526
+ {
527
+ "epoch": 1.231190150478796,
528
+ "grad_norm": 7.812533378601074,
529
+ "learning_rate": 4.873081015351878e-05,
530
+ "loss": 0.5902,
531
+ "step": 1800
532
+ },
533
+ {
534
+ "epoch": 1.2482900136798905,
535
+ "grad_norm": 16.708251953125,
536
+ "learning_rate": 4.8635810913512694e-05,
537
+ "loss": 0.6682,
538
+ "step": 1825
539
+ },
540
+ {
541
+ "epoch": 1.265389876880985,
542
+ "grad_norm": 62.408294677734375,
543
+ "learning_rate": 4.854081167350661e-05,
544
+ "loss": 0.7275,
545
+ "step": 1850
546
+ },
547
+ {
548
+ "epoch": 1.2824897400820794,
549
+ "grad_norm": 11.449152946472168,
550
+ "learning_rate": 4.8445812433500535e-05,
551
+ "loss": 0.7308,
552
+ "step": 1875
553
+ },
554
+ {
555
+ "epoch": 1.2995896032831737,
556
+ "grad_norm": 6.670589447021484,
557
+ "learning_rate": 4.835081319349445e-05,
558
+ "loss": 0.6819,
559
+ "step": 1900
560
+ },
561
+ {
562
+ "epoch": 1.316689466484268,
563
+ "grad_norm": 14.783951759338379,
564
+ "learning_rate": 4.8255813953488375e-05,
565
+ "loss": 0.7014,
566
+ "step": 1925
567
+ },
568
+ {
569
+ "epoch": 1.3337893296853625,
570
+ "grad_norm": 11.176630973815918,
571
+ "learning_rate": 4.816081471348229e-05,
572
+ "loss": 0.7157,
573
+ "step": 1950
574
+ },
575
+ {
576
+ "epoch": 1.350889192886457,
577
+ "grad_norm": 14.224772453308105,
578
+ "learning_rate": 4.8065815473476216e-05,
579
+ "loss": 0.58,
580
+ "step": 1975
581
+ },
582
+ {
583
+ "epoch": 1.3679890560875512,
584
+ "grad_norm": 35.03193283081055,
585
+ "learning_rate": 4.797081623347013e-05,
586
+ "loss": 0.7904,
587
+ "step": 2000
588
+ },
589
+ {
590
+ "epoch": 1.3850889192886457,
591
+ "grad_norm": 47.37995147705078,
592
+ "learning_rate": 4.7875816993464056e-05,
593
+ "loss": 0.7543,
594
+ "step": 2025
595
+ },
596
+ {
597
+ "epoch": 1.40218878248974,
598
+ "grad_norm": 13.47080135345459,
599
+ "learning_rate": 4.778081775345797e-05,
600
+ "loss": 0.8038,
601
+ "step": 2050
602
+ },
603
+ {
604
+ "epoch": 1.4192886456908345,
605
+ "grad_norm": 5.6258158683776855,
606
+ "learning_rate": 4.76858185134519e-05,
607
+ "loss": 0.7249,
608
+ "step": 2075
609
+ },
610
+ {
611
+ "epoch": 1.4363885088919288,
612
+ "grad_norm": 8.781723976135254,
613
+ "learning_rate": 4.7590819273445814e-05,
614
+ "loss": 0.5521,
615
+ "step": 2100
616
+ },
617
+ {
618
+ "epoch": 1.4534883720930232,
619
+ "grad_norm": 7.260859966278076,
620
+ "learning_rate": 4.749582003343974e-05,
621
+ "loss": 0.5422,
622
+ "step": 2125
623
+ },
624
+ {
625
+ "epoch": 1.4705882352941178,
626
+ "grad_norm": 9.566021919250488,
627
+ "learning_rate": 4.7400820793433654e-05,
628
+ "loss": 0.7651,
629
+ "step": 2150
630
+ },
631
+ {
632
+ "epoch": 1.487688098495212,
633
+ "grad_norm": 26.22560691833496,
634
+ "learning_rate": 4.730582155342758e-05,
635
+ "loss": 0.6579,
636
+ "step": 2175
637
+ },
638
+ {
639
+ "epoch": 1.5047879616963065,
640
+ "grad_norm": 7.469398498535156,
641
+ "learning_rate": 4.7210822313421495e-05,
642
+ "loss": 0.7464,
643
+ "step": 2200
644
+ },
645
+ {
646
+ "epoch": 1.5218878248974008,
647
+ "grad_norm": 12.48919677734375,
648
+ "learning_rate": 4.711582307341542e-05,
649
+ "loss": 0.6315,
650
+ "step": 2225
651
+ },
652
+ {
653
+ "epoch": 1.5389876880984952,
654
+ "grad_norm": 7.278232097625732,
655
+ "learning_rate": 4.7020823833409335e-05,
656
+ "loss": 0.5035,
657
+ "step": 2250
658
+ },
659
+ {
660
+ "epoch": 1.5560875512995898,
661
+ "grad_norm": 20.841951370239258,
662
+ "learning_rate": 4.692582459340326e-05,
663
+ "loss": 0.7368,
664
+ "step": 2275
665
+ },
666
+ {
667
+ "epoch": 1.573187414500684,
668
+ "grad_norm": 19.840713500976562,
669
+ "learning_rate": 4.6830825353397176e-05,
670
+ "loss": 0.7734,
671
+ "step": 2300
672
+ },
673
+ {
674
+ "epoch": 1.5902872777017785,
675
+ "grad_norm": 20.479629516601562,
676
+ "learning_rate": 4.673582611339109e-05,
677
+ "loss": 0.6132,
678
+ "step": 2325
679
+ },
680
+ {
681
+ "epoch": 1.6073871409028728,
682
+ "grad_norm": 18.495811462402344,
683
+ "learning_rate": 4.664082687338501e-05,
684
+ "loss": 0.5594,
685
+ "step": 2350
686
+ },
687
+ {
688
+ "epoch": 1.6244870041039672,
689
+ "grad_norm": 8.165420532226562,
690
+ "learning_rate": 4.6545827633378933e-05,
691
+ "loss": 0.6215,
692
+ "step": 2375
693
+ },
694
+ {
695
+ "epoch": 1.6415868673050615,
696
+ "grad_norm": 9.201417922973633,
697
+ "learning_rate": 4.645082839337285e-05,
698
+ "loss": 0.5739,
699
+ "step": 2400
700
+ },
701
+ {
702
+ "epoch": 1.658686730506156,
703
+ "grad_norm": 27.07282829284668,
704
+ "learning_rate": 4.6355829153366774e-05,
705
+ "loss": 0.6288,
706
+ "step": 2425
707
+ },
708
+ {
709
+ "epoch": 1.6757865937072505,
710
+ "grad_norm": 7.3830976486206055,
711
+ "learning_rate": 4.626082991336069e-05,
712
+ "loss": 0.5158,
713
+ "step": 2450
714
+ },
715
+ {
716
+ "epoch": 1.6928864569083446,
717
+ "grad_norm": 14.039732933044434,
718
+ "learning_rate": 4.6165830673354615e-05,
719
+ "loss": 0.6536,
720
+ "step": 2475
721
+ },
722
+ {
723
+ "epoch": 1.7099863201094392,
724
+ "grad_norm": 31.472610473632812,
725
+ "learning_rate": 4.607083143334854e-05,
726
+ "loss": 0.6594,
727
+ "step": 2500
728
+ },
729
+ {
730
+ "epoch": 1.7270861833105335,
731
+ "grad_norm": 13.184996604919434,
732
+ "learning_rate": 4.5975832193342455e-05,
733
+ "loss": 0.6299,
734
+ "step": 2525
735
+ },
736
+ {
737
+ "epoch": 1.744186046511628,
738
+ "grad_norm": 5.3301286697387695,
739
+ "learning_rate": 4.588083295333638e-05,
740
+ "loss": 0.5745,
741
+ "step": 2550
742
+ },
743
+ {
744
+ "epoch": 1.7612859097127223,
745
+ "grad_norm": 5.333646774291992,
746
+ "learning_rate": 4.5785833713330296e-05,
747
+ "loss": 0.5694,
748
+ "step": 2575
749
+ },
750
+ {
751
+ "epoch": 1.7783857729138166,
752
+ "grad_norm": 22.24896240234375,
753
+ "learning_rate": 4.569083447332422e-05,
754
+ "loss": 0.5209,
755
+ "step": 2600
756
+ },
757
+ {
758
+ "epoch": 1.7954856361149112,
759
+ "grad_norm": 10.656512260437012,
760
+ "learning_rate": 4.5595835233318136e-05,
761
+ "loss": 0.5069,
762
+ "step": 2625
763
+ },
764
+ {
765
+ "epoch": 1.8125854993160053,
766
+ "grad_norm": 14.687119483947754,
767
+ "learning_rate": 4.550083599331206e-05,
768
+ "loss": 0.5486,
769
+ "step": 2650
770
+ },
771
+ {
772
+ "epoch": 1.8296853625171,
773
+ "grad_norm": 22.30952262878418,
774
+ "learning_rate": 4.540583675330598e-05,
775
+ "loss": 0.6733,
776
+ "step": 2675
777
+ },
778
+ {
779
+ "epoch": 1.8467852257181943,
780
+ "grad_norm": 9.407474517822266,
781
+ "learning_rate": 4.53108375132999e-05,
782
+ "loss": 0.4365,
783
+ "step": 2700
784
+ },
785
+ {
786
+ "epoch": 1.8638850889192886,
787
+ "grad_norm": 9.207115173339844,
788
+ "learning_rate": 4.521583827329382e-05,
789
+ "loss": 0.6675,
790
+ "step": 2725
791
+ },
792
+ {
793
+ "epoch": 1.8809849521203832,
794
+ "grad_norm": 22.828750610351562,
795
+ "learning_rate": 4.5120839033287734e-05,
796
+ "loss": 0.5888,
797
+ "step": 2750
798
+ },
799
+ {
800
+ "epoch": 1.8980848153214773,
801
+ "grad_norm": 6.441003322601318,
802
+ "learning_rate": 4.502583979328165e-05,
803
+ "loss": 0.4317,
804
+ "step": 2775
805
+ },
806
+ {
807
+ "epoch": 1.915184678522572,
808
+ "grad_norm": 7.692673683166504,
809
+ "learning_rate": 4.4930840553275575e-05,
810
+ "loss": 0.622,
811
+ "step": 2800
812
+ },
813
+ {
814
+ "epoch": 1.9322845417236663,
815
+ "grad_norm": 11.904592514038086,
816
+ "learning_rate": 4.483584131326949e-05,
817
+ "loss": 0.5188,
818
+ "step": 2825
819
+ },
820
+ {
821
+ "epoch": 1.9493844049247606,
822
+ "grad_norm": 7.662288188934326,
823
+ "learning_rate": 4.4740842073263415e-05,
824
+ "loss": 0.7301,
825
+ "step": 2850
826
+ },
827
+ {
828
+ "epoch": 1.966484268125855,
829
+ "grad_norm": 14.738003730773926,
830
+ "learning_rate": 4.464584283325733e-05,
831
+ "loss": 0.6253,
832
+ "step": 2875
833
+ },
834
+ {
835
+ "epoch": 1.9835841313269493,
836
+ "grad_norm": 19.53965950012207,
837
+ "learning_rate": 4.4550843593251256e-05,
838
+ "loss": 0.5386,
839
+ "step": 2900
840
+ },
841
+ {
842
+ "epoch": 2.0,
843
+ "eval_accuracy": 0.8098327074086719,
844
+ "eval_f1_macro": 0.5180108004626454,
845
+ "eval_f1_micro": 0.8098327074086719,
846
+ "eval_f1_weighted": 0.8111086454170916,
847
+ "eval_loss": 0.5644355416297913,
848
+ "eval_precision_macro": 0.703611295965861,
849
+ "eval_precision_micro": 0.8098327074086719,
850
+ "eval_precision_weighted": 0.8679013857559181,
851
+ "eval_recall_macro": 0.5354131711427872,
852
+ "eval_recall_micro": 0.8098327074086719,
853
+ "eval_recall_weighted": 0.8098327074086719,
854
+ "eval_runtime": 19.2316,
855
+ "eval_samples_per_second": 152.302,
856
+ "eval_steps_per_second": 9.568,
857
+ "step": 2924
858
+ },
859
+ {
860
+ "epoch": 2.000683994528044,
861
+ "grad_norm": 21.892972946166992,
862
+ "learning_rate": 4.445584435324517e-05,
863
+ "loss": 0.6068,
864
+ "step": 2925
865
+ },
866
+ {
867
+ "epoch": 2.017783857729138,
868
+ "grad_norm": 21.534278869628906,
869
+ "learning_rate": 4.4360845113239096e-05,
870
+ "loss": 0.5734,
871
+ "step": 2950
872
+ },
873
+ {
874
+ "epoch": 2.0348837209302326,
875
+ "grad_norm": 5.837319850921631,
876
+ "learning_rate": 4.426584587323301e-05,
877
+ "loss": 0.6038,
878
+ "step": 2975
879
+ },
880
+ {
881
+ "epoch": 2.0519835841313268,
882
+ "grad_norm": 9.711400985717773,
883
+ "learning_rate": 4.417084663322694e-05,
884
+ "loss": 0.5024,
885
+ "step": 3000
886
+ },
887
+ {
888
+ "epoch": 2.0690834473324213,
889
+ "grad_norm": 6.591183185577393,
890
+ "learning_rate": 4.4075847393220854e-05,
891
+ "loss": 0.6356,
892
+ "step": 3025
893
+ },
894
+ {
895
+ "epoch": 2.086183310533516,
896
+ "grad_norm": 6.523811340332031,
897
+ "learning_rate": 4.398084815321478e-05,
898
+ "loss": 0.6886,
899
+ "step": 3050
900
+ },
901
+ {
902
+ "epoch": 2.10328317373461,
903
+ "grad_norm": 14.16163158416748,
904
+ "learning_rate": 4.38858489132087e-05,
905
+ "loss": 0.4388,
906
+ "step": 3075
907
+ },
908
+ {
909
+ "epoch": 2.1203830369357046,
910
+ "grad_norm": 17.01984214782715,
911
+ "learning_rate": 4.379084967320262e-05,
912
+ "loss": 0.5274,
913
+ "step": 3100
914
+ },
915
+ {
916
+ "epoch": 2.1374829001367988,
917
+ "grad_norm": 14.736923217773438,
918
+ "learning_rate": 4.369585043319654e-05,
919
+ "loss": 0.5568,
920
+ "step": 3125
921
+ },
922
+ {
923
+ "epoch": 2.1545827633378933,
924
+ "grad_norm": 10.865409851074219,
925
+ "learning_rate": 4.360085119319046e-05,
926
+ "loss": 0.5113,
927
+ "step": 3150
928
+ },
929
+ {
930
+ "epoch": 2.1716826265389875,
931
+ "grad_norm": 22.429012298583984,
932
+ "learning_rate": 4.3505851953184376e-05,
933
+ "loss": 0.6578,
934
+ "step": 3175
935
+ },
936
+ {
937
+ "epoch": 2.188782489740082,
938
+ "grad_norm": 24.677793502807617,
939
+ "learning_rate": 4.34108527131783e-05,
940
+ "loss": 0.7861,
941
+ "step": 3200
942
+ },
943
+ {
944
+ "epoch": 2.2058823529411766,
945
+ "grad_norm": 10.499431610107422,
946
+ "learning_rate": 4.3315853473172216e-05,
947
+ "loss": 0.5801,
948
+ "step": 3225
949
+ },
950
+ {
951
+ "epoch": 2.2229822161422708,
952
+ "grad_norm": 3.640758514404297,
953
+ "learning_rate": 4.322085423316613e-05,
954
+ "loss": 0.6225,
955
+ "step": 3250
956
+ },
957
+ {
958
+ "epoch": 2.2400820793433653,
959
+ "grad_norm": 2.9993128776550293,
960
+ "learning_rate": 4.312585499316006e-05,
961
+ "loss": 0.5234,
962
+ "step": 3275
963
+ },
964
+ {
965
+ "epoch": 2.2571819425444595,
966
+ "grad_norm": 31.736900329589844,
967
+ "learning_rate": 4.3030855753153974e-05,
968
+ "loss": 0.6043,
969
+ "step": 3300
970
+ },
971
+ {
972
+ "epoch": 2.274281805745554,
973
+ "grad_norm": 24.770673751831055,
974
+ "learning_rate": 4.29358565131479e-05,
975
+ "loss": 0.468,
976
+ "step": 3325
977
+ },
978
+ {
979
+ "epoch": 2.2913816689466486,
980
+ "grad_norm": 16.497438430786133,
981
+ "learning_rate": 4.2840857273141814e-05,
982
+ "loss": 0.6247,
983
+ "step": 3350
984
+ },
985
+ {
986
+ "epoch": 2.3084815321477428,
987
+ "grad_norm": 11.79759693145752,
988
+ "learning_rate": 4.274585803313574e-05,
989
+ "loss": 0.4275,
990
+ "step": 3375
991
+ },
992
+ {
993
+ "epoch": 2.3255813953488373,
994
+ "grad_norm": 7.504731178283691,
995
+ "learning_rate": 4.2650858793129655e-05,
996
+ "loss": 0.5987,
997
+ "step": 3400
998
+ },
999
+ {
1000
+ "epoch": 2.3426812585499315,
1001
+ "grad_norm": 9.874656677246094,
1002
+ "learning_rate": 4.2559659522723825e-05,
1003
+ "loss": 0.3649,
1004
+ "step": 3425
1005
+ },
1006
+ {
1007
+ "epoch": 2.359781121751026,
1008
+ "grad_norm": 4.051993370056152,
1009
+ "learning_rate": 4.246466028271774e-05,
1010
+ "loss": 0.6407,
1011
+ "step": 3450
1012
+ },
1013
+ {
1014
+ "epoch": 2.37688098495212,
1015
+ "grad_norm": 25.524431228637695,
1016
+ "learning_rate": 4.236966104271166e-05,
1017
+ "loss": 0.6522,
1018
+ "step": 3475
1019
+ },
1020
+ {
1021
+ "epoch": 2.3939808481532148,
1022
+ "grad_norm": 10.878904342651367,
1023
+ "learning_rate": 4.2274661802705576e-05,
1024
+ "loss": 0.568,
1025
+ "step": 3500
1026
+ },
1027
+ {
1028
+ "epoch": 2.4110807113543093,
1029
+ "grad_norm": 26.06123924255371,
1030
+ "learning_rate": 4.21796625626995e-05,
1031
+ "loss": 0.4338,
1032
+ "step": 3525
1033
+ },
1034
+ {
1035
+ "epoch": 2.4281805745554035,
1036
+ "grad_norm": 9.708687782287598,
1037
+ "learning_rate": 4.2084663322693416e-05,
1038
+ "loss": 0.5463,
1039
+ "step": 3550
1040
+ },
1041
+ {
1042
+ "epoch": 2.445280437756498,
1043
+ "grad_norm": 25.002485275268555,
1044
+ "learning_rate": 4.198966408268734e-05,
1045
+ "loss": 0.4026,
1046
+ "step": 3575
1047
+ },
1048
+ {
1049
+ "epoch": 2.462380300957592,
1050
+ "grad_norm": 27.914440155029297,
1051
+ "learning_rate": 4.189466484268126e-05,
1052
+ "loss": 0.4373,
1053
+ "step": 3600
1054
+ },
1055
+ {
1056
+ "epoch": 2.4794801641586868,
1057
+ "grad_norm": 16.424388885498047,
1058
+ "learning_rate": 4.179966560267518e-05,
1059
+ "loss": 0.6144,
1060
+ "step": 3625
1061
+ },
1062
+ {
1063
+ "epoch": 2.496580027359781,
1064
+ "grad_norm": 18.099689483642578,
1065
+ "learning_rate": 4.17046663626691e-05,
1066
+ "loss": 0.4678,
1067
+ "step": 3650
1068
+ },
1069
+ {
1070
+ "epoch": 2.5136798905608755,
1071
+ "grad_norm": 7.258431434631348,
1072
+ "learning_rate": 4.160966712266302e-05,
1073
+ "loss": 0.5745,
1074
+ "step": 3675
1075
+ },
1076
+ {
1077
+ "epoch": 2.53077975376197,
1078
+ "grad_norm": 12.761260986328125,
1079
+ "learning_rate": 4.151466788265694e-05,
1080
+ "loss": 0.564,
1081
+ "step": 3700
1082
+ },
1083
+ {
1084
+ "epoch": 2.547879616963064,
1085
+ "grad_norm": 10.828967094421387,
1086
+ "learning_rate": 4.141966864265086e-05,
1087
+ "loss": 0.5247,
1088
+ "step": 3725
1089
+ },
1090
+ {
1091
+ "epoch": 2.5649794801641588,
1092
+ "grad_norm": 8.467166900634766,
1093
+ "learning_rate": 4.1324669402644785e-05,
1094
+ "loss": 0.6447,
1095
+ "step": 3750
1096
+ },
1097
+ {
1098
+ "epoch": 2.582079343365253,
1099
+ "grad_norm": 5.6609883308410645,
1100
+ "learning_rate": 4.12296701626387e-05,
1101
+ "loss": 0.4998,
1102
+ "step": 3775
1103
+ },
1104
+ {
1105
+ "epoch": 2.5991792065663475,
1106
+ "grad_norm": 8.889337539672852,
1107
+ "learning_rate": 4.1134670922632626e-05,
1108
+ "loss": 0.6064,
1109
+ "step": 3800
1110
+ },
1111
+ {
1112
+ "epoch": 2.616279069767442,
1113
+ "grad_norm": 10.798035621643066,
1114
+ "learning_rate": 4.103967168262654e-05,
1115
+ "loss": 0.4447,
1116
+ "step": 3825
1117
+ },
1118
+ {
1119
+ "epoch": 2.633378932968536,
1120
+ "grad_norm": 5.452834129333496,
1121
+ "learning_rate": 4.0944672442620466e-05,
1122
+ "loss": 0.5188,
1123
+ "step": 3850
1124
+ },
1125
+ {
1126
+ "epoch": 2.650478796169631,
1127
+ "grad_norm": 21.596166610717773,
1128
+ "learning_rate": 4.084967320261438e-05,
1129
+ "loss": 0.486,
1130
+ "step": 3875
1131
+ },
1132
+ {
1133
+ "epoch": 2.667578659370725,
1134
+ "grad_norm": 27.14288330078125,
1135
+ "learning_rate": 4.07546739626083e-05,
1136
+ "loss": 0.586,
1137
+ "step": 3900
1138
+ },
1139
+ {
1140
+ "epoch": 2.6846785225718195,
1141
+ "grad_norm": 6.16865873336792,
1142
+ "learning_rate": 4.065967472260222e-05,
1143
+ "loss": 0.5886,
1144
+ "step": 3925
1145
+ },
1146
+ {
1147
+ "epoch": 2.701778385772914,
1148
+ "grad_norm": 10.905616760253906,
1149
+ "learning_rate": 4.056467548259614e-05,
1150
+ "loss": 0.5854,
1151
+ "step": 3950
1152
+ },
1153
+ {
1154
+ "epoch": 2.718878248974008,
1155
+ "grad_norm": 23.615705490112305,
1156
+ "learning_rate": 4.046967624259006e-05,
1157
+ "loss": 0.5838,
1158
+ "step": 3975
1159
+ },
1160
+ {
1161
+ "epoch": 2.7359781121751023,
1162
+ "grad_norm": 20.192031860351562,
1163
+ "learning_rate": 4.037467700258398e-05,
1164
+ "loss": 0.4683,
1165
+ "step": 4000
1166
+ },
1167
+ {
1168
+ "epoch": 2.753077975376197,
1169
+ "grad_norm": 4.011788845062256,
1170
+ "learning_rate": 4.02796777625779e-05,
1171
+ "loss": 0.4985,
1172
+ "step": 4025
1173
+ },
1174
+ {
1175
+ "epoch": 2.7701778385772915,
1176
+ "grad_norm": 4.153777122497559,
1177
+ "learning_rate": 4.018467852257182e-05,
1178
+ "loss": 0.654,
1179
+ "step": 4050
1180
+ },
1181
+ {
1182
+ "epoch": 2.7872777017783856,
1183
+ "grad_norm": 7.651889324188232,
1184
+ "learning_rate": 4.008967928256574e-05,
1185
+ "loss": 0.5508,
1186
+ "step": 4075
1187
+ },
1188
+ {
1189
+ "epoch": 2.80437756497948,
1190
+ "grad_norm": 4.187475681304932,
1191
+ "learning_rate": 3.999468004255966e-05,
1192
+ "loss": 0.5831,
1193
+ "step": 4100
1194
+ },
1195
+ {
1196
+ "epoch": 2.8214774281805743,
1197
+ "grad_norm": 15.8203763961792,
1198
+ "learning_rate": 3.989968080255358e-05,
1199
+ "loss": 0.5752,
1200
+ "step": 4125
1201
+ },
1202
+ {
1203
+ "epoch": 2.838577291381669,
1204
+ "grad_norm": 3.2058730125427246,
1205
+ "learning_rate": 3.98046815625475e-05,
1206
+ "loss": 0.5395,
1207
+ "step": 4150
1208
+ },
1209
+ {
1210
+ "epoch": 2.8556771545827635,
1211
+ "grad_norm": 20.272655487060547,
1212
+ "learning_rate": 3.970968232254142e-05,
1213
+ "loss": 0.6421,
1214
+ "step": 4175
1215
+ },
1216
+ {
1217
+ "epoch": 2.8727770177838576,
1218
+ "grad_norm": 12.041251182556152,
1219
+ "learning_rate": 3.961468308253534e-05,
1220
+ "loss": 0.5199,
1221
+ "step": 4200
1222
+ },
1223
+ {
1224
+ "epoch": 2.889876880984952,
1225
+ "grad_norm": 8.457215309143066,
1226
+ "learning_rate": 3.951968384252926e-05,
1227
+ "loss": 0.3992,
1228
+ "step": 4225
1229
+ },
1230
+ {
1231
+ "epoch": 2.9069767441860463,
1232
+ "grad_norm": 7.0436787605285645,
1233
+ "learning_rate": 3.9424684602523184e-05,
1234
+ "loss": 0.4758,
1235
+ "step": 4250
1236
+ },
1237
+ {
1238
+ "epoch": 2.924076607387141,
1239
+ "grad_norm": 14.91028881072998,
1240
+ "learning_rate": 3.93296853625171e-05,
1241
+ "loss": 0.565,
1242
+ "step": 4275
1243
+ },
1244
+ {
1245
+ "epoch": 2.9411764705882355,
1246
+ "grad_norm": 6.1303229331970215,
1247
+ "learning_rate": 3.9234686122511024e-05,
1248
+ "loss": 0.4658,
1249
+ "step": 4300
1250
+ },
1251
+ {
1252
+ "epoch": 2.9582763337893296,
1253
+ "grad_norm": 3.7947280406951904,
1254
+ "learning_rate": 3.913968688250494e-05,
1255
+ "loss": 0.4188,
1256
+ "step": 4325
1257
+ },
1258
+ {
1259
+ "epoch": 2.975376196990424,
1260
+ "grad_norm": 6.020320415496826,
1261
+ "learning_rate": 3.9044687642498865e-05,
1262
+ "loss": 0.5847,
1263
+ "step": 4350
1264
+ },
1265
+ {
1266
+ "epoch": 2.9924760601915183,
1267
+ "grad_norm": 3.9531519412994385,
1268
+ "learning_rate": 3.894968840249278e-05,
1269
+ "loss": 0.5847,
1270
+ "step": 4375
1271
+ },
1272
+ {
1273
+ "epoch": 3.0,
1274
+ "eval_accuracy": 0.9061113007852509,
1275
+ "eval_f1_macro": 0.7552413664079601,
1276
+ "eval_f1_micro": 0.9061113007852509,
1277
+ "eval_f1_weighted": 0.9035670983782715,
1278
+ "eval_loss": 0.2802155911922455,
1279
+ "eval_precision_macro": 0.8498221458985794,
1280
+ "eval_precision_micro": 0.9061113007852509,
1281
+ "eval_precision_weighted": 0.9095288940815534,
1282
+ "eval_recall_macro": 0.7266443770545475,
1283
+ "eval_recall_micro": 0.9061113007852509,
1284
+ "eval_recall_weighted": 0.9061113007852509,
1285
+ "eval_runtime": 19.2634,
1286
+ "eval_samples_per_second": 152.05,
1287
+ "eval_steps_per_second": 9.552,
1288
+ "step": 4386
1289
+ }
1290
+ ],
1291
+ "logging_steps": 25,
1292
+ "max_steps": 14620,
1293
+ "num_input_tokens_seen": 0,
1294
+ "num_train_epochs": 10,
1295
+ "save_steps": 500,
1296
+ "total_flos": 1.1417490381074596e+18,
1297
+ "train_batch_size": 8,
1298
+ "trial_name": null,
1299
+ "trial_params": null
1300
+ }
checkpoint-4386/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48cd7c21ab92767e5577a72d05fa0d7d8e7f0c4b90a01fc872477c42845783d6
3
+ size 5048
config.json ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/swinv2-tiny-patch4-window8-256",
3
+ "_num_labels": 17,
4
+ "architectures": [
5
+ "Swinv2ForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "depths": [
9
+ 2,
10
+ 2,
11
+ 6,
12
+ 2
13
+ ],
14
+ "drop_path_rate": 0.1,
15
+ "embed_dim": 96,
16
+ "encoder_stride": 32,
17
+ "hidden_act": "gelu",
18
+ "hidden_dropout_prob": 0.0,
19
+ "hidden_size": 768,
20
+ "id2label": {
21
+ "0": "AadhaarBack",
22
+ "1": "AadhaarBackMasked",
23
+ "2": "AadhaarFront",
24
+ "3": "AadhaarFrontMasked",
25
+ "4": "AadhaarRegular",
26
+ "5": "AadhaarRegularMasked",
27
+ "6": "AadhaarSelfGenerated",
28
+ "7": "DrivingLicenseNew",
29
+ "8": "DrivingLicenseOld",
30
+ "9": "MultipleOVDs",
31
+ "10": "PanCard",
32
+ "11": "PassportFirst",
33
+ "12": "PassportLast",
34
+ "13": "PassportRegular",
35
+ "14": "VoterCardBack",
36
+ "15": "VoterCardFront",
37
+ "16": "VoterCardRegular"
38
+ },
39
+ "image_size": 256,
40
+ "initializer_range": 0.02,
41
+ "label2id": {
42
+ "AadhaarBack": 0,
43
+ "AadhaarBackMasked": 1,
44
+ "AadhaarFront": 2,
45
+ "AadhaarFrontMasked": 3,
46
+ "AadhaarRegular": 4,
47
+ "AadhaarRegularMasked": 5,
48
+ "AadhaarSelfGenerated": 6,
49
+ "DrivingLicenseNew": 7,
50
+ "DrivingLicenseOld": 8,
51
+ "MultipleOVDs": 9,
52
+ "PanCard": 10,
53
+ "PassportFirst": 11,
54
+ "PassportLast": 12,
55
+ "PassportRegular": 13,
56
+ "VoterCardBack": 14,
57
+ "VoterCardFront": 15,
58
+ "VoterCardRegular": 16
59
+ },
60
+ "layer_norm_eps": 1e-05,
61
+ "mlp_ratio": 4.0,
62
+ "model_type": "swinv2",
63
+ "num_channels": 3,
64
+ "num_heads": [
65
+ 3,
66
+ 6,
67
+ 12,
68
+ 24
69
+ ],
70
+ "num_layers": 4,
71
+ "out_features": [
72
+ "stage4"
73
+ ],
74
+ "out_indices": [
75
+ 4
76
+ ],
77
+ "patch_size": 4,
78
+ "path_norm": true,
79
+ "pretrained_window_sizes": [
80
+ 0,
81
+ 0,
82
+ 0,
83
+ 0
84
+ ],
85
+ "problem_type": "single_label_classification",
86
+ "qkv_bias": true,
87
+ "stage_names": [
88
+ "stem",
89
+ "stage1",
90
+ "stage2",
91
+ "stage3",
92
+ "stage4"
93
+ ],
94
+ "torch_dtype": "float32",
95
+ "transformers_version": "4.40.1",
96
+ "use_absolute_embeddings": false,
97
+ "window_size": 8
98
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27b710e007c37457688de5a0f4b11dee19fa7b8c2f1a4371d8ddca2ddc5484a1
3
+ size 110396292
preprocessor_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.485,
21
+ 0.456,
22
+ 0.406
23
+ ],
24
+ "image_processor_type": "ViTImageProcessor",
25
+ "image_std": [
26
+ 0.229,
27
+ 0.224,
28
+ 0.225
29
+ ],
30
+ "resample": 3,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 256,
34
+ "width": 256
35
+ }
36
+ }
runs/May03_08-08-24_60f4804cf903/events.out.tfevents.1714723705.60f4804cf903.11751.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4b41d1de40a6fa9c92e76ec03187a32418086de240849f569b9cbe417880454f
3
- size 54313
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f14fd4fc3187fa1cd5c26c75388869d0ed5d7774622b3639819f5be68e5ddce8
3
+ size 85229
runs/May03_08-08-24_60f4804cf903/events.out.tfevents.1714724719.60f4804cf903.11751.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aec03708c58f4d63eb36fabef7e2a30b20c2f19588dd41d12bf2d6f394448acb
3
+ size 921
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48cd7c21ab92767e5577a72d05fa0d7d8e7f0c4b90a01fc872477c42845783d6
3
+ size 5048
training_params.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "autotrain-swinv2-tiny-patch4-window8-256/autotrain-data",
3
+ "model": "microsoft/swinv2-tiny-patch4-window8-256",
4
+ "username": "Kushagra07",
5
+ "lr": 5e-05,
6
+ "epochs": 10,
7
+ "batch_size": 8,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "logging_steps": -1,
18
+ "project_name": "autotrain-swinv2-tiny-patch4-window8-256",
19
+ "auto_find_batch_size": true,
20
+ "mixed_precision": "fp16",
21
+ "save_total_limit": 1,
22
+ "push_to_hub": true,
23
+ "evaluation_strategy": "epoch",
24
+ "image_column": "autotrain_image",
25
+ "target_column": "autotrain_label",
26
+ "log": "tensorboard"
27
+ }