chaodu001 commited on
Commit
82b4ab3
·
verified ·
1 Parent(s): 2ee57c6

Upload 16 files

Browse files
segformer_b2_clothes/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
segformer_b2_clothes/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .ipynb_checkpoints
2
+ test.ipynb
segformer_b2_clothes/README.md ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - vision
5
+ - image-segmentation
6
+ widget:
7
+ - src: https://images.unsplash.com/photo-1643310325061-2beef64926a5?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxzZWFyY2h8Nnx8cmFjb29uc3xlbnwwfHwwfHw%3D&w=1000&q=80
8
+ example_title: Person
9
+ - src: https://freerangestock.com/sample/139043/young-man-standing-and-leaning-on-car.jpg
10
+ example_title: Person
11
+ datasets:
12
+ - mattmdjaga/human_parsing_dataset
13
+ ---
14
+ # Segformer B2 fine-tuned for clothes segmentation
15
+
16
+ SegFormer model fine-tuned on [ATR dataset](https://github.com/lemondan/HumanParsing-Dataset) for clothes segmentation but can also be used for human segmentation.
17
+ The dataset on hugging face is called "mattmdjaga/human_parsing_dataset".
18
+
19
+ **[Training code](https://github.com/mattmdjaga/segformer_b2_clothes)**.
20
+ ```python
21
+ from transformers import SegformerImageProcessor, AutoModelForSemanticSegmentation
22
+ from PIL import Image
23
+ import requests
24
+ import matplotlib.pyplot as plt
25
+ import torch.nn as nn
26
+
27
+ processor = SegformerImageProcessor.from_pretrained("mattmdjaga/segformer_b2_clothes")
28
+ model = AutoModelForSemanticSegmentation.from_pretrained("mattmdjaga/segformer_b2_clothes")
29
+
30
+ url = "https://plus.unsplash.com/premium_photo-1673210886161-bfcc40f54d1f?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8cGVyc29uJTIwc3RhbmRpbmd8ZW58MHx8MHx8&w=1000&q=80"
31
+
32
+ image = Image.open(requests.get(url, stream=True).raw)
33
+ inputs = processor(images=image, return_tensors="pt")
34
+
35
+ outputs = model(**inputs)
36
+ logits = outputs.logits.cpu()
37
+
38
+ upsampled_logits = nn.functional.interpolate(
39
+ logits,
40
+ size=image.size[::-1],
41
+ mode="bilinear",
42
+ align_corners=False,
43
+ )
44
+
45
+ pred_seg = upsampled_logits.argmax(dim=1)[0]
46
+ plt.imshow(pred_seg)
47
+ ```
48
+
49
+ Labels: 0: "Background", 1: "Hat", 2: "Hair", 3: "Sunglasses", 4: "Upper-clothes", 5: "Skirt", 6: "Pants", 7: "Dress", 8: "Belt", 9: "Left-shoe", 10: "Right-shoe", 11: "Face", 12: "Left-leg", 13: "Right-leg", 14: "Left-arm", 15: "Right-arm", 16: "Bag", 17: "Scarf"
50
+
51
+ ### Evaluation
52
+
53
+ | Label Index | Label Name | Category Accuracy | Category IoU |
54
+ |:-------------:|:----------------:|:-----------------:|:------------:|
55
+ | 0 | Background | 0.99 | 0.99 |
56
+ | 1 | Hat | 0.73 | 0.68 |
57
+ | 2 | Hair | 0.91 | 0.82 |
58
+ | 3 | Sunglasses | 0.73 | 0.63 |
59
+ | 4 | Upper-clothes | 0.87 | 0.78 |
60
+ | 5 | Skirt | 0.76 | 0.65 |
61
+ | 6 | Pants | 0.90 | 0.84 |
62
+ | 7 | Dress | 0.74 | 0.55 |
63
+ | 8 | Belt | 0.35 | 0.30 |
64
+ | 9 | Left-shoe | 0.74 | 0.58 |
65
+ | 10 | Right-shoe | 0.75 | 0.60 |
66
+ | 11 | Face | 0.92 | 0.85 |
67
+ | 12 | Left-leg | 0.90 | 0.82 |
68
+ | 13 | Right-leg | 0.90 | 0.81 |
69
+ | 14 | Left-arm | 0.86 | 0.74 |
70
+ | 15 | Right-arm | 0.82 | 0.73 |
71
+ | 16 | Bag | 0.91 | 0.84 |
72
+ | 17 | Scarf | 0.63 | 0.29 |
73
+
74
+ Overall Evaluation Metrics:
75
+ - Evaluation Loss: 0.15
76
+ - Mean Accuracy: 0.80
77
+ - Mean IoU: 0.69
78
+
79
+ ### License
80
+
81
+ The license for this model can be found [here](https://github.com/NVlabs/SegFormer/blob/master/LICENSE).
82
+
83
+ ### BibTeX entry and citation info
84
+
85
+ ```bibtex
86
+ @article{DBLP:journals/corr/abs-2105-15203,
87
+ author = {Enze Xie and
88
+ Wenhai Wang and
89
+ Zhiding Yu and
90
+ Anima Anandkumar and
91
+ Jose M. Alvarez and
92
+ Ping Luo},
93
+ title = {SegFormer: Simple and Efficient Design for Semantic Segmentation with
94
+ Transformers},
95
+ journal = {CoRR},
96
+ volume = {abs/2105.15203},
97
+ year = {2021},
98
+ url = {https://arxiv.org/abs/2105.15203},
99
+ eprinttype = {arXiv},
100
+ eprint = {2105.15203},
101
+ timestamp = {Wed, 02 Jun 2021 11:46:42 +0200},
102
+ biburl = {https://dblp.org/rec/journals/corr/abs-2105-15203.bib},
103
+ bibsource = {dblp computer science bibliography, https://dblp.org}
104
+ }
105
+ ```
segformer_b2_clothes/config.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "nvidia/mit-b2",
3
+ "architectures": [
4
+ "SegformerForSemanticSegmentation"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "classifier_dropout_prob": 0.1,
8
+ "decoder_hidden_size": 768,
9
+ "depths": [
10
+ 3,
11
+ 4,
12
+ 6,
13
+ 3
14
+ ],
15
+ "downsampling_rates": [
16
+ 1,
17
+ 4,
18
+ 8,
19
+ 16
20
+ ],
21
+ "drop_path_rate": 0.1,
22
+ "hidden_act": "gelu",
23
+ "hidden_dropout_prob": 0.0,
24
+ "hidden_sizes": [
25
+ 64,
26
+ 128,
27
+ 320,
28
+ 512
29
+ ],
30
+ "id2label": {
31
+ "0": "Background",
32
+ "1": "Hat",
33
+ "2": "Hair",
34
+ "3": "Sunglasses",
35
+ "4": "Upper-clothes",
36
+ "5": "Skirt",
37
+ "6": "Pants",
38
+ "7": "Dress",
39
+ "8": "Belt",
40
+ "9": "Left-shoe",
41
+ "10": "Right-shoe",
42
+ "11": "Face",
43
+ "12": "Left-leg",
44
+ "13": "Right-leg",
45
+ "14": "Left-arm",
46
+ "15": "Right-arm",
47
+ "16": "Bag",
48
+ "17": "Scarf"
49
+ },
50
+ "image_size": 224,
51
+ "initializer_range": 0.02,
52
+ "label2id": {
53
+ "Background": 0,
54
+ "Bag": 16,
55
+ "Belt": 8,
56
+ "Dress": 7,
57
+ "Face": 11,
58
+ "Hair": 2,
59
+ "Hat": 1,
60
+ "Left-arm": 14,
61
+ "Left-leg": 12,
62
+ "Left-shoe": 9,
63
+ "Pants": 6,
64
+ "Right-arm": 15,
65
+ "Right-leg": 13,
66
+ "Right-shoe": 10,
67
+ "Scarf": 17,
68
+ "Skirt": 5,
69
+ "Sunglasses": 3,
70
+ "Upper-clothes": 4
71
+ },
72
+ "layer_norm_eps": 1e-06,
73
+ "mlp_ratios": [
74
+ 4,
75
+ 4,
76
+ 4,
77
+ 4
78
+ ],
79
+ "model_type": "segformer",
80
+ "num_attention_heads": [
81
+ 1,
82
+ 2,
83
+ 5,
84
+ 8
85
+ ],
86
+ "num_channels": 3,
87
+ "num_encoder_blocks": 4,
88
+ "patch_sizes": [
89
+ 7,
90
+ 3,
91
+ 3,
92
+ 3
93
+ ],
94
+ "reshape_last_stage": true,
95
+ "semantic_loss_ignore_index": 255,
96
+ "sr_ratios": [
97
+ 8,
98
+ 4,
99
+ 2,
100
+ 1
101
+ ],
102
+ "strides": [
103
+ 4,
104
+ 2,
105
+ 2,
106
+ 2
107
+ ],
108
+ "torch_dtype": "float32",
109
+ "transformers_version": "4.24.0"
110
+ }
segformer_b2_clothes/handler.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from PIL import Image
3
+ from io import BytesIO
4
+ from transformers import AutoModelForSemanticSegmentation, AutoFeatureExtractor
5
+ import base64
6
+ import torch
7
+ from torch import nn
8
+
9
+ class EndpointHandler():
10
+ def __init__(self, path="."):
11
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
+ self.model = AutoModelForSemanticSegmentation.from_pretrained(path).to(self.device).eval()
13
+ self.feature_extractor = AutoFeatureExtractor.from_pretrained(path)
14
+
15
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
16
+ """
17
+ data args:
18
+ images (:obj:`PIL.Image`)
19
+ candiates (:obj:`list`)
20
+ Return:
21
+ A :obj:`list`:. The list contains items that are dicts should be liked {"label": "XXX", "score": 0.82}
22
+ """
23
+ inputs = data.pop("inputs", data)
24
+
25
+ # decode base64 image to PIL
26
+ image = Image.open(BytesIO(base64.b64decode(inputs['image'])))
27
+
28
+ # preprocess image
29
+ encoding = self.feature_extractor(images=image, return_tensors="pt")
30
+ pixel_values = encoding["pixel_values"].to(self.device)
31
+ with torch.no_grad():
32
+ outputs = self.model(pixel_values=pixel_values)
33
+ logits = outputs.logits
34
+ upsampled_logits = nn.functional.interpolate(logits,
35
+ size=image.size[::-1],
36
+ mode="bilinear",
37
+ align_corners=False,)
38
+ pred_seg = upsampled_logits.argmax(dim=1)[0]
39
+ return pred_seg.tolist()
segformer_b2_clothes/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f86fd90c567afd4370b3cc3a7e81ed767a632b2832a738331af660acc0c4c68
3
+ size 109493236
segformer_b2_clothes/onnx/config.json ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mattmdjaga/segformer_b2_clothes",
3
+ "architectures": [
4
+ "SegformerForSemanticSegmentation"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "classifier_dropout_prob": 0.1,
8
+ "decoder_hidden_size": 768,
9
+ "depths": [
10
+ 3,
11
+ 4,
12
+ 6,
13
+ 3
14
+ ],
15
+ "downsampling_rates": [
16
+ 1,
17
+ 4,
18
+ 8,
19
+ 16
20
+ ],
21
+ "drop_path_rate": 0.1,
22
+ "hidden_act": "gelu",
23
+ "hidden_dropout_prob": 0.0,
24
+ "hidden_sizes": [
25
+ 64,
26
+ 128,
27
+ 320,
28
+ 512
29
+ ],
30
+ "id2label": {
31
+ "0": "Background",
32
+ "1": "Hat",
33
+ "2": "Hair",
34
+ "3": "Sunglasses",
35
+ "4": "Upper-clothes",
36
+ "5": "Skirt",
37
+ "6": "Pants",
38
+ "7": "Dress",
39
+ "8": "Belt",
40
+ "9": "Left-shoe",
41
+ "10": "Right-shoe",
42
+ "11": "Face",
43
+ "12": "Left-leg",
44
+ "13": "Right-leg",
45
+ "14": "Left-arm",
46
+ "15": "Right-arm",
47
+ "16": "Bag",
48
+ "17": "Scarf"
49
+ },
50
+ "image_size": 224,
51
+ "initializer_range": 0.02,
52
+ "label2id": {
53
+ "Background": 0,
54
+ "Bag": 16,
55
+ "Belt": 8,
56
+ "Dress": 7,
57
+ "Face": 11,
58
+ "Hair": 2,
59
+ "Hat": 1,
60
+ "Left-arm": 14,
61
+ "Left-leg": 12,
62
+ "Left-shoe": 9,
63
+ "Pants": 6,
64
+ "Right-arm": 15,
65
+ "Right-leg": 13,
66
+ "Right-shoe": 10,
67
+ "Scarf": 17,
68
+ "Skirt": 5,
69
+ "Sunglasses": 3,
70
+ "Upper-clothes": 4
71
+ },
72
+ "layer_norm_eps": 1e-06,
73
+ "mlp_ratios": [
74
+ 4,
75
+ 4,
76
+ 4,
77
+ 4
78
+ ],
79
+ "model_type": "segformer",
80
+ "num_attention_heads": [
81
+ 1,
82
+ 2,
83
+ 5,
84
+ 8
85
+ ],
86
+ "num_channels": 3,
87
+ "num_encoder_blocks": 4,
88
+ "patch_sizes": [
89
+ 7,
90
+ 3,
91
+ 3,
92
+ 3
93
+ ],
94
+ "reshape_last_stage": true,
95
+ "semantic_loss_ignore_index": 255,
96
+ "sr_ratios": [
97
+ 8,
98
+ 4,
99
+ 2,
100
+ 1
101
+ ],
102
+ "strides": [
103
+ 4,
104
+ 2,
105
+ 2,
106
+ 2
107
+ ],
108
+ "transformers_version": "4.34.0"
109
+ }
segformer_b2_clothes/onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a93a8dac171b5c1fcc53632a8bfc180bfd9759ea69a3e207451bb07f76add54f
3
+ size 110039290
segformer_b2_clothes/onnx/preprocessor_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_reduce_labels": false,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "feature_extractor_type": "SegformerFeatureExtractor",
7
+ "image_mean": [
8
+ 0.485,
9
+ 0.456,
10
+ 0.406
11
+ ],
12
+ "image_processor_type": "SegformerFeatureExtractor",
13
+ "image_std": [
14
+ 0.229,
15
+ 0.224,
16
+ 0.225
17
+ ],
18
+ "resample": 2,
19
+ "rescale_factor": 0.00392156862745098,
20
+ "size": {
21
+ "height": 512,
22
+ "width": 512
23
+ }
24
+ }
segformer_b2_clothes/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f642f5c29cb7c9ac0ff242ccf94220c88913f4a65db4727b2530a987ce14d9a
3
+ size 219104837
segformer_b2_clothes/preprocessor_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_resize": true,
4
+ "feature_extractor_type": "SegformerFeatureExtractor",
5
+ "image_mean": [
6
+ 0.485,
7
+ 0.456,
8
+ 0.406
9
+ ],
10
+ "image_std": [
11
+ 0.229,
12
+ 0.224,
13
+ 0.225
14
+ ],
15
+ "reduce_labels": false,
16
+ "resample": 2,
17
+ "size": 512
18
+ }
segformer_b2_clothes/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:934543143c97acf3197b030bb0ba046f6c713757467a7dcf47f27ce8c0d6264d
3
+ size 109579005
segformer_b2_clothes/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7c38376dfee2c075efd2b37186139541f47970794c545ba17f510796313aaa8
3
+ size 14575
segformer_b2_clothes/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a9a297dec0fe2336eab64ac3bbd47e4936655c43239740a40cfe5f4623a0657
3
+ size 627
segformer_b2_clothes/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
segformer_b2_clothes/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:210f58c34439201a03f7a2e923b10e2a9b03a8943740f452ae4e8f57ebcfc186
3
+ size 3323