add model
Browse files- config.json +9 -8
- pytorch_model.bin +2 -2
config.json
CHANGED
@@ -1,24 +1,25 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "
|
3 |
"architectures": [
|
4 |
"ViTForImageClassification"
|
5 |
],
|
6 |
"attention_probs_dropout_prob": 0.0,
|
|
|
7 |
"hidden_act": "gelu",
|
8 |
"hidden_dropout_prob": 0.0,
|
9 |
"hidden_size": 768,
|
10 |
"id2label": {
|
11 |
-
"0": "
|
12 |
-
"1": "
|
13 |
-
"2": "
|
14 |
},
|
15 |
"image_size": 224,
|
16 |
"initializer_range": 0.02,
|
17 |
"intermediate_size": 3072,
|
18 |
"label2id": {
|
19 |
-
"
|
20 |
-
"
|
21 |
-
"
|
22 |
},
|
23 |
"layer_norm_eps": 1e-12,
|
24 |
"model_type": "vit",
|
@@ -28,5 +29,5 @@
|
|
28 |
"patch_size": 16,
|
29 |
"qkv_bias": true,
|
30 |
"torch_dtype": "float32",
|
31 |
-
"transformers_version": "4.
|
32 |
}
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "NhatPham/vit-base-patch16-224-recylce-ft",
|
3 |
"architectures": [
|
4 |
"ViTForImageClassification"
|
5 |
],
|
6 |
"attention_probs_dropout_prob": 0.0,
|
7 |
+
"encoder_stride": 16,
|
8 |
"hidden_act": "gelu",
|
9 |
"hidden_dropout_prob": 0.0,
|
10 |
"hidden_size": 768,
|
11 |
"id2label": {
|
12 |
+
"0": "Object",
|
13 |
+
"1": "Recycle",
|
14 |
+
"2": "Non-Recycle "
|
15 |
},
|
16 |
"image_size": 224,
|
17 |
"initializer_range": 0.02,
|
18 |
"intermediate_size": 3072,
|
19 |
"label2id": {
|
20 |
+
"Non-Recycle ": 2,
|
21 |
+
"Object": 0,
|
22 |
+
"Recycle": 1
|
23 |
},
|
24 |
"layer_norm_eps": 1e-12,
|
25 |
"model_type": "vit",
|
|
|
29 |
"patch_size": 16,
|
30 |
"qkv_bias": true,
|
31 |
"torch_dtype": "float32",
|
32 |
+
"transformers_version": "4.19.2"
|
33 |
}
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:43753b62f79b09a3f760e437778302b9b9f7440e43b431f3b8e7a0a21483bc97
|
3 |
+
size 343267249
|