chaodu001 commited on
Commit
2ee57c6
1 Parent(s): 1c5deaf

Upload 5 files

Browse files
segformer_b3_clothes/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
segformer_b3_clothes/README.md ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - vision
5
+ - image-segmentation
6
+ widget:
7
+ - src: >-
8
+ https://images.unsplash.com/photo-1643310325061-2beef64926a5?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxzZWFyY2h8Nnx8cmFjb29uc3xlbnwwfHwwfHw%3D&w=1000&q=80
9
+ example_title: Person
10
+ - src: >-
11
+ https://freerangestock.com/sample/139043/young-man-standing-and-leaning-on-car.jpg
12
+ example_title: Person
13
+ datasets:
14
+ - mattmdjaga/human_parsing_dataset
15
+ pipeline_tag: image-segmentation
16
+ ---
17
+ # Segformer B3 fine-tuned for clothes segmentation
18
+
19
+ SegFormer model fine-tuned on [ATR dataset](https://github.com/lemondan/HumanParsing-Dataset) for clothes segmentation but can also be used for human segmentation.
20
+ The dataset on hugging face is called "mattmdjaga/human_parsing_dataset".
21
+
22
+
23
+ **NEW** -
24
+ **[Training code](https://github.com/mattmdjaga/segformer_b2_clothes)**. Right now it only contains the pure code with some comments, but soon I'll add a colab notebook version
25
+ and a blog post with it to make it more friendly.
26
+
27
+ ```python
28
+ from transformers import SegformerImageProcessor, AutoModelForSemanticSegmentation
29
+ from PIL import Image
30
+ import requests
31
+ import matplotlib.pyplot as plt
32
+ import torch.nn as nn
33
+
34
+ processor = SegformerImageProcessor.from_pretrained("sayeed99/segformer_b3_clothes")
35
+ model = AutoModelForSemanticSegmentation.from_pretrained("sayeed99/segformer_b3_clothes")
36
+
37
+ url = "https://plus.unsplash.com/premium_photo-1673210886161-bfcc40f54d1f?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8cGVyc29uJTIwc3RhbmRpbmd8ZW58MHx8MHx8&w=1000&q=80"
38
+
39
+ image = Image.open(requests.get(url, stream=True).raw)
40
+ inputs = processor(images=image, return_tensors="pt")
41
+
42
+ outputs = model(**inputs)
43
+ logits = outputs.logits.cpu()
44
+
45
+ upsampled_logits = nn.functional.interpolate(
46
+ logits,
47
+ size=image.size[::-1],
48
+ mode="bilinear",
49
+ align_corners=False,
50
+ )
51
+
52
+ pred_seg = upsampled_logits.argmax(dim=1)[0]
53
+ plt.imshow(pred_seg)
54
+ ```
55
+
56
+ Labels: 0: "Background", 1: "Hat", 2: "Hair", 3: "Sunglasses", 4: "Upper-clothes", 5: "Skirt", 6: "Pants", 7: "Dress", 8: "Belt", 9: "Left-shoe", 10: "Right-shoe", 11: "Face", 12: "Left-leg", 13: "Right-leg", 14: "Left-arm", 15: "Right-arm", 16: "Bag", 17: "Scarf"
57
+
58
+ ### Evaluation
59
+
60
+ | Label Index | Label Name | Category Accuracy | Category IoU |
61
+ |:-------------:|:----------------:|:-----------------:|:------------:|
62
+ | 0 | Background | 0.99 | 0.99 |
63
+ | 1 | Hat | 0.73 | 0.68 |
64
+ | 2 | Hair | 0.91 | 0.82 |
65
+ | 3 | Sunglasses | 0.73 | 0.63 |
66
+ | 4 | Upper-clothes | 0.87 | 0.78 |
67
+ | 5 | Skirt | 0.76 | 0.65 |
68
+ | 6 | Pants | 0.90 | 0.84 |
69
+ | 7 | Dress | 0.74 | 0.55 |
70
+ | 8 | Belt | 0.35 | 0.30 |
71
+ | 9 | Left-shoe | 0.74 | 0.58 |
72
+ | 10 | Right-shoe | 0.75 | 0.60 |
73
+ | 11 | Face | 0.92 | 0.85 |
74
+ | 12 | Left-leg | 0.90 | 0.82 |
75
+ | 13 | Right-leg | 0.90 | 0.81 |
76
+ | 14 | Left-arm | 0.86 | 0.74 |
77
+ | 15 | Right-arm | 0.82 | 0.73 |
78
+ | 16 | Bag | 0.91 | 0.84 |
79
+ | 17 | Scarf | 0.63 | 0.29 |
80
+
81
+ Overall Evaluation Metrics:
82
+ - Evaluation Loss: 0.15
83
+ - Mean Accuracy: 0.80
84
+ - Mean IoU: 0.69
85
+
86
+ ### License
87
+
88
+ The license for this model can be found [here](https://github.com/NVlabs/SegFormer/blob/master/LICENSE).
89
+
90
+ ### BibTeX entry and citation info
91
+
92
+ ```bibtex
93
+ @article{DBLP:journals/corr/abs-2105-15203,
94
+ author = {Enze Xie and
95
+ Wenhai Wang and
96
+ Zhiding Yu and
97
+ Anima Anandkumar and
98
+ Jose M. Alvarez and
99
+ Ping Luo},
100
+ title = {SegFormer: Simple and Efficient Design for Semantic Segmentation with
101
+ Transformers},
102
+ journal = {CoRR},
103
+ volume = {abs/2105.15203},
104
+ year = {2021},
105
+ url = {https://arxiv.org/abs/2105.15203},
106
+ eprinttype = {arXiv},
107
+ eprint = {2105.15203},
108
+ timestamp = {Wed, 02 Jun 2021 11:46:42 +0200},
109
+ biburl = {https://dblp.org/rec/journals/corr/abs-2105-15203.bib},
110
+ bibsource = {dblp computer science bibliography, https://dblp.org}
111
+ }
112
+ ```
segformer_b3_clothes/config.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "nvidia/mit-b3",
3
+ "architectures": [
4
+ "SegformerForSemanticSegmentation"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "classifier_dropout_prob": 0.1,
8
+ "decoder_hidden_size": 768,
9
+ "depths": [
10
+ 3,
11
+ 4,
12
+ 18,
13
+ 3
14
+ ],
15
+ "downsampling_rates": [
16
+ 1,
17
+ 4,
18
+ 8,
19
+ 16
20
+ ],
21
+ "drop_path_rate": 0.1,
22
+ "hidden_act": "gelu",
23
+ "hidden_dropout_prob": 0.0,
24
+ "hidden_sizes": [
25
+ 64,
26
+ 128,
27
+ 320,
28
+ 512
29
+ ],
30
+ "id2label": {
31
+ "0": "Background",
32
+ "1": "Hat",
33
+ "10": "Right-shoe",
34
+ "11": "Face",
35
+ "12": "Left-leg",
36
+ "13": "Right-leg",
37
+ "14": "Left-arm",
38
+ "15": "Right-arm",
39
+ "16": "Bag",
40
+ "17": "Scarf",
41
+ "2": "Hair",
42
+ "3": "Sunglasses",
43
+ "4": "Upper-clothes",
44
+ "5": "Skirt",
45
+ "6": "Pants",
46
+ "7": "Dress",
47
+ "8": "Belt",
48
+ "9": "Left-shoe"
49
+ },
50
+ "image_size": 224,
51
+ "initializer_range": 0.02,
52
+ "label2id": {
53
+ "Background": "0",
54
+ "Bag": "16",
55
+ "Belt": "8",
56
+ "Dress": "7",
57
+ "Face": "11",
58
+ "Hair": "2",
59
+ "Hat": "1",
60
+ "Left-arm": "14",
61
+ "Left-leg": "12",
62
+ "Left-shoe": "9",
63
+ "Pants": "6",
64
+ "Right-arm": "15",
65
+ "Right-leg": "13",
66
+ "Right-shoe": "10",
67
+ "Scarf": "17",
68
+ "Skirt": "5",
69
+ "Sunglasses": "3",
70
+ "Upper-clothes": "4"
71
+ },
72
+ "layer_norm_eps": 1e-06,
73
+ "mlp_ratios": [
74
+ 4,
75
+ 4,
76
+ 4,
77
+ 4
78
+ ],
79
+ "model_type": "segformer",
80
+ "num_attention_heads": [
81
+ 1,
82
+ 2,
83
+ 5,
84
+ 8
85
+ ],
86
+ "num_channels": 3,
87
+ "num_encoder_blocks": 4,
88
+ "patch_sizes": [
89
+ 7,
90
+ 3,
91
+ 3,
92
+ 3
93
+ ],
94
+ "reshape_last_stage": true,
95
+ "semantic_loss_ignore_index": 255,
96
+ "sr_ratios": [
97
+ 8,
98
+ 4,
99
+ 2,
100
+ 1
101
+ ],
102
+ "strides": [
103
+ 4,
104
+ 2,
105
+ 2,
106
+ 2
107
+ ],
108
+ "torch_dtype": "float32",
109
+ "transformers_version": "4.38.1"
110
+ }
segformer_b3_clothes/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f70ae566c5773fb335796ebaa8acc924ac25eb97222c2b2967d44d2fc11568e6
3
+ size 189029000
segformer_b3_clothes/preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_reduce_labels": false,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.485,
8
+ 0.456,
9
+ 0.406
10
+ ],
11
+ "image_processor_type": "SegformerImageProcessor",
12
+ "image_std": [
13
+ 0.229,
14
+ 0.224,
15
+ 0.225
16
+ ],
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 512,
21
+ "width": 512
22
+ }
23
+ }