selvaa commited on
Commit
87d201d
1 Parent(s): de92830

Model save

Browse files
README.md CHANGED
@@ -2,8 +2,6 @@
2
  license: other
3
  base_model: nvidia/segformer-b1-finetuned-cityscapes-1024-1024
4
  tags:
5
- - vision
6
- - image-segmentation
7
  - generated_from_trainer
8
  model-index:
9
  - name: segformer-b1-finetuned-cityscapes-1024-1024-latestt
@@ -15,7 +13,7 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # segformer-b1-finetuned-cityscapes-1024-1024-latestt
17
 
18
- This model is a fine-tuned version of [nvidia/segformer-b1-finetuned-cityscapes-1024-1024](https://huggingface.co/nvidia/segformer-b1-finetuned-cityscapes-1024-1024) on the selvaa/straighter-only-final dataset.
19
  It achieves the following results on the evaluation set:
20
  - Loss: 0.0311
21
  - Mean Iou: 0.9368
 
2
  license: other
3
  base_model: nvidia/segformer-b1-finetuned-cityscapes-1024-1024
4
  tags:
 
 
5
  - generated_from_trainer
6
  model-index:
7
  - name: segformer-b1-finetuned-cityscapes-1024-1024-latestt
 
13
 
14
  # segformer-b1-finetuned-cityscapes-1024-1024-latestt
15
 
16
+ This model is a fine-tuned version of [nvidia/segformer-b1-finetuned-cityscapes-1024-1024](https://huggingface.co/nvidia/segformer-b1-finetuned-cityscapes-1024-1024) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
  - Loss: 0.0311
19
  - Mean Iou: 0.9368
model/config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "nvidia/segformer-b1-finetuned-cityscapes-1024-1024",
3
+ "architectures": [
4
+ "SegformerForSemanticSegmentation"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "classifier_dropout_prob": 0.1,
8
+ "decoder_hidden_size": 256,
9
+ "depths": [
10
+ 2,
11
+ 2,
12
+ 2,
13
+ 2
14
+ ],
15
+ "downsampling_rates": [
16
+ 1,
17
+ 4,
18
+ 8,
19
+ 16
20
+ ],
21
+ "drop_path_rate": 0.1,
22
+ "hidden_act": "gelu",
23
+ "hidden_dropout_prob": 0.0,
24
+ "hidden_sizes": [
25
+ 64,
26
+ 128,
27
+ 320,
28
+ 512
29
+ ],
30
+ "id2label": {
31
+ "0": "default",
32
+ "1": "pipe",
33
+ "2": "floor",
34
+ "3": "background"
35
+ },
36
+ "image_size": 224,
37
+ "initializer_range": 0.02,
38
+ "label2id": {
39
+ "background": 3,
40
+ "default": 0,
41
+ "floor": 2,
42
+ "pipe": 1
43
+ },
44
+ "layer_norm_eps": 1e-06,
45
+ "mlp_ratios": [
46
+ 4,
47
+ 4,
48
+ 4,
49
+ 4
50
+ ],
51
+ "model_type": "segformer",
52
+ "num_attention_heads": [
53
+ 1,
54
+ 2,
55
+ 5,
56
+ 8
57
+ ],
58
+ "num_channels": 3,
59
+ "num_encoder_blocks": 4,
60
+ "patch_sizes": [
61
+ 7,
62
+ 3,
63
+ 3,
64
+ 3
65
+ ],
66
+ "reshape_last_stage": true,
67
+ "semantic_loss_ignore_index": 255,
68
+ "sr_ratios": [
69
+ 8,
70
+ 4,
71
+ 2,
72
+ 1
73
+ ],
74
+ "strides": [
75
+ 4,
76
+ 2,
77
+ 2,
78
+ 2
79
+ ],
80
+ "torch_dtype": "float32",
81
+ "transformers_version": "4.35.2"
82
+ }
model/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f05ab0320141a26e95b78d131ef63b083d7dd79fed34de8c30033abf9a869565
3
+ size 54739432
model/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e621e314b4d5997b9c11b85ff40a1100606a02d22e149572b69eba374b92640b
3
+ size 4283