Datasets:
Tasks:
Image Segmentation
License:
SatwikKambham
commited on
Commit
•
ff7f0d1
1
Parent(s):
43f849e
Added loading script and updated readme
Browse files
README.md
CHANGED
@@ -1,3 +1,19 @@
|
|
1 |
---
|
2 |
license: mit
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
1 |
---
|
2 |
license: mit
|
3 |
+
dataset_info:
|
4 |
+
config_name: suim
|
5 |
+
features:
|
6 |
+
- name: img
|
7 |
+
dtype: image
|
8 |
+
- name: mask
|
9 |
+
dtype: image
|
10 |
+
splits:
|
11 |
+
- name: train
|
12 |
+
num_bytes: 511917
|
13 |
+
num_examples: 1525
|
14 |
+
- name: test
|
15 |
+
num_bytes: 35774
|
16 |
+
num_examples: 110
|
17 |
+
download_size: 183261195
|
18 |
+
dataset_size: 547691
|
19 |
---
|
suim.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset
|
2 |
+
# script contributor.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Semantic Segmentation of Underwater IMagery (SUIM) dataset"""
|
16 |
+
|
17 |
+
|
18 |
+
import os
|
19 |
+
|
20 |
+
import datasets
|
21 |
+
|
22 |
+
|
23 |
+
_CITATION = """\
|
24 |
+
@inproceedings{islam2020suim,
|
25 |
+
title={{Semantic Segmentation of Underwater Imagery: Dataset and Benchmark}},
|
26 |
+
author={Islam, Md Jahidul and Edge, Chelsey and Xiao, Yuyang and Luo, Peigen and Mehtaz,
|
27 |
+
Muntaqim and Morse, Christopher and Enan, Sadman Sakib and Sattar, Junaed},
|
28 |
+
booktitle={IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
|
29 |
+
year={2020},
|
30 |
+
organization={IEEE/RSJ}
|
31 |
+
}
|
32 |
+
"""
|
33 |
+
|
34 |
+
_DESCRIPTION = """\
|
35 |
+
The SUIM dataset is a dataset for semantic segmentation of underwater imagery.
|
36 |
+
|
37 |
+
The dataset consists of 1525 annotated images for training/validation and
|
38 |
+
110 samples for testing.
|
39 |
+
|
40 |
+
| Object category | Symbol | RGB color code |
|
41 |
+
|----------------------------------|--------|----------------|
|
42 |
+
| Background (waterbody) | BW | 000 (black) |
|
43 |
+
| Human divers | HD | 001 (blue) |
|
44 |
+
| Aquatic plants and sea-grass | PF | 010 (green) |
|
45 |
+
| Wrecks and ruins | WR | 011 (sky) |
|
46 |
+
| Robots (AUVs/ROVs/instruments) | RO | 100 (red) |
|
47 |
+
| Reefs and invertebrates | RI | 101 (pink) |
|
48 |
+
| Fish and vertebrates | FV | 110 (yellow) |
|
49 |
+
| Sea-floor and rocks | SR | 111 (white) |
|
50 |
+
|
51 |
+
|
52 |
+
For more information about the original SUIM dataset,
|
53 |
+
please visit the official dataset page:
|
54 |
+
|
55 |
+
https://irvlab.cs.umn.edu/resources/suim-dataset
|
56 |
+
|
57 |
+
Please refer to the original dataset source for any additional details,
|
58 |
+
citations, or specific usage guidelines provided by the dataset creators.
|
59 |
+
"""
|
60 |
+
|
61 |
+
_HOMEPAGE = "https://irvlab.cs.umn.edu/resources/suim-dataset"
|
62 |
+
|
63 |
+
_LICENSE = "mit"
|
64 |
+
|
65 |
+
|
66 |
+
class ExDark(datasets.GeneratorBasedBuilder):
|
67 |
+
"""Semantic Segmentation of Underwater IMagery (SUIM) dataset"""
|
68 |
+
|
69 |
+
VERSION = datasets.Version("1.0.0")
|
70 |
+
|
71 |
+
BUILDER_CONFIGS = [
|
72 |
+
datasets.BuilderConfig(
|
73 |
+
name="suim",
|
74 |
+
version=VERSION,
|
75 |
+
description="Semantic Segmentation of Underwater IMagery (SUIM) dataset",
|
76 |
+
),
|
77 |
+
]
|
78 |
+
|
79 |
+
DEFAULT_CONFIG_NAME = "suim"
|
80 |
+
|
81 |
+
def _info(self):
|
82 |
+
return datasets.DatasetInfo(
|
83 |
+
description=_DESCRIPTION,
|
84 |
+
features=datasets.Features(
|
85 |
+
{
|
86 |
+
"img": datasets.Image(),
|
87 |
+
"mask": datasets.Image(),
|
88 |
+
}
|
89 |
+
),
|
90 |
+
homepage=_HOMEPAGE,
|
91 |
+
license=_LICENSE,
|
92 |
+
citation=_CITATION,
|
93 |
+
)
|
94 |
+
|
95 |
+
def _split_generators(self, dl_manager):
|
96 |
+
data_dir = dl_manager.download_and_extract("SUIM.zip")
|
97 |
+
train_dir = os.path.join(data_dir, "SUIM", "train_val")
|
98 |
+
test_dir = os.path.join(data_dir, "SUIM", "TEST")
|
99 |
+
|
100 |
+
return [
|
101 |
+
datasets.SplitGenerator(
|
102 |
+
name=datasets.Split.TRAIN,
|
103 |
+
gen_kwargs={
|
104 |
+
"data_dir": train_dir,
|
105 |
+
"split": "train",
|
106 |
+
},
|
107 |
+
),
|
108 |
+
datasets.SplitGenerator(
|
109 |
+
name=datasets.Split.TEST,
|
110 |
+
gen_kwargs={
|
111 |
+
"data_dir": test_dir,
|
112 |
+
"split": "test",
|
113 |
+
},
|
114 |
+
),
|
115 |
+
]
|
116 |
+
|
117 |
+
def _generate_examples(self, data_dir, split):
|
118 |
+
img_dir = os.path.join(data_dir, "images")
|
119 |
+
masks_dir = os.path.join(data_dir, "masks")
|
120 |
+
img_files = os.listdir(img_dir)
|
121 |
+
|
122 |
+
for idx, img_file in enumerate(img_files):
|
123 |
+
img_path = os.path.join(img_dir, img_file)
|
124 |
+
mask_path = os.path.join(
|
125 |
+
masks_dir,
|
126 |
+
img_file.replace(".jpg", ".bmp"),
|
127 |
+
)
|
128 |
+
record = {
|
129 |
+
"img": img_path,
|
130 |
+
"mask": mask_path,
|
131 |
+
}
|
132 |
+
yield idx, record
|