PHilita commited on
Commit
87141c1
1 Parent(s): 937cc8f
Carla-COCO-Object-Detection-Dataset.py CHANGED
@@ -1,158 +1,19 @@
1
- # # coding=utf-8
2
- # # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- # #
4
- # # Licensed under the Apache License, Version 2.0 (the "License");
5
- # # you may not use this file except in compliance with the License.
6
- # # You may obtain a copy of the License at
7
- # #
8
- # # http://www.apache.org/licenses/LICENSE-2.0
9
- # #
10
- # # Unless required by applicable law or agreed to in writing, software
11
- # # distributed under the License is distributed on an "AS IS" BASIS,
12
- # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # # See the License for the specific language governing permissions and
14
- # # limitations under the License.
15
- # """CPPE-5 dataset."""
16
-
17
-
18
- # import collections
19
- # import json
20
- # import os
21
-
22
- # import datasets
23
-
24
-
25
- # _CITATION = """\
26
- # @misc{dagli2021cppe5,
27
- # title={CPPE-5: Medical Personal Protective Equipment Dataset},
28
- # author={Rishit Dagli and Ali Mustufa Shaikh},
29
- # year={2021},
30
- # eprint={2112.09569},
31
- # archivePrefix={arXiv},
32
- # primaryClass={cs.CV}
33
- # }
34
- # """
35
-
36
- # _DESCRIPTION = """\
37
- # CPPE - 5 (Medical Personal Protective Equipment) is a new challenging dataset with the goal
38
- # to allow the study of subordinate categorization of medical personal protective equipments,
39
- # which is not possible with other popular data sets that focus on broad level categories.
40
- # """
41
-
42
- # _HOMEPAGE = "https://sites.google.com/view/cppe5"
43
-
44
- # _LICENSE = "Unknown"
45
-
46
- # _URL = "https://drive.google.com/uc?id=1QeveFt1jDNrafJeeCV1N_KoIKQEZyhuf"
47
-
48
- # _CATEGORIES = ["Coverall", "Face_Shield", "Gloves", "Goggles", "Mask"]
49
-
50
-
51
- # class CPPE5(datasets.GeneratorBasedBuilder):
52
- # """CPPE - 5 dataset."""
53
-
54
- # VERSION = datasets.Version("1.0.0")
55
-
56
- # def _info(self):
57
- # features = datasets.Features(
58
- # {
59
- # "image_id": datasets.Value("int64"),
60
- # "image": datasets.Image(),
61
- # "width": datasets.Value("int32"),
62
- # "height": datasets.Value("int32"),
63
- # "objects": datasets.Sequence(
64
- # {
65
- # "id": datasets.Value("int64"),
66
- # "area": datasets.Value("int64"),
67
- # "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
68
- # "category": datasets.ClassLabel(names=_CATEGORIES),
69
- # }
70
- # ),
71
- # }
72
- # )
73
- # return datasets.DatasetInfo(
74
- # description=_DESCRIPTION,
75
- # features=features,
76
- # homepage=_HOMEPAGE,
77
- # license=_LICENSE,
78
- # citation=_CITATION,
79
- # )
80
-
81
- # def _split_generators(self, dl_manager):
82
- # archive = dl_manager.download(_URL)
83
- # return [
84
- # datasets.SplitGenerator(
85
- # name=datasets.Split.TRAIN,
86
- # gen_kwargs={
87
- # "annotation_file_path": "annotations/train.json",
88
- # "files": dl_manager.iter_archive(archive),
89
- # },
90
- # ),
91
- # datasets.SplitGenerator(
92
- # name=datasets.Split.TEST,
93
- # gen_kwargs={
94
- # "annotation_file_path": "annotations/test.json",
95
- # "files": dl_manager.iter_archive(archive),
96
- # },
97
- # ),
98
- # ]
99
-
100
- # def _generate_examples(self, annotation_file_path, files):
101
- # def process_annot(annot, category_id_to_category):
102
- # return {
103
- # "id": annot["id"],
104
- # "area": annot["area"],
105
- # "bbox": annot["bbox"],
106
- # "category": category_id_to_category[annot["category_id"]],
107
- # }
108
-
109
- # image_id_to_image = {}
110
- # idx = 0
111
- # # This loop relies on the ordering of the files in the archive:
112
- # # Annotation files come first, then the images.
113
- # for path, f in files:
114
- # file_name = os.path.basename(path)
115
- # if path == annotation_file_path:
116
- # annotations = json.load(f)
117
- # category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
118
- # image_id_to_annotations = collections.defaultdict(list)
119
- # for annot in annotations["annotations"]:
120
- # image_id_to_annotations[annot["image_id"]].append(annot)
121
- # image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]}
122
- # elif file_name in image_id_to_image:
123
- # image = image_id_to_image[file_name]
124
- # objects = [
125
- # process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
126
- # ]
127
- # yield idx, {
128
- # "image_id": image["id"],
129
- # "image": {"path": path, "bytes": f.read()},
130
- # "width": image["width"],
131
- # "height": image["height"],
132
- # "objects": objects,
133
- # }
134
- # idx += 1
135
-
136
  # coding=utf-8
137
- # Permission is hereby granted, free of charge, to any person obtaining
138
- # a copy of this software and associated documentation files (the
139
- # "Software"), to deal in the Software without restriction, including
140
- # without limitation the rights to use, copy, modify, merge, publish,
141
- # distribute, sublicense, and/or sell copies of the Software, and to
142
- # permit persons to whom the Software is furnished to do so, subject to
143
- # the following conditions:
144
-
145
- # The above copyright notice and this permission notice shall be
146
- # included in all copies or substantial portions of the Software.
147
-
148
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
149
- # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
150
- # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
151
- # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
152
- # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
153
- # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
154
- # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
155
- """Carla-COCO-Object-Detection-Dataset"""
156
 
157
  import collections
158
  import json
@@ -161,50 +22,51 @@ import os
161
  import datasets
162
 
163
 
164
- logger = datasets.logging.get_logger(__name__)
 
 
 
 
 
 
 
 
 
165
 
166
  _DESCRIPTION = """\
167
- This dataset contains 1028 images each 640x380 pixels.
168
- The dataset is split into 249 test and 779 training examples.
169
- Every image comes with MS COCO format annotations.
170
- The dataset was collected in Carla Simulator, driving around in autopilot mode in various environments
171
- (Town01, Town02, Town03, Town04, Town05) and saving every i-th frame.
172
- The labels where then automatically generated using the semantic segmentation information.
173
  """
174
 
175
- _HOMEPAGE = "https://github.com/yunusskeete/Carla-COCO-Object-Detection-Dataset"
176
 
177
- _LICENSE = "MIT"
178
 
179
- # _URL = "https://drive.google.com/uc?id=1QeveFt1jDNrafJeeCV1N_KoIKQEZyhuf"
180
- # # _URL = "https://drive.google.com/uc?id=1xUPwrMBBrGFIapLx_fyLjmH4HN16A4iZ"
181
- _URL = "https://huggingface.co/datasets/yunusskeete/Carla-COCO-Object-Detection-Dataset/resolve/main/Carla-COCO-Object-Detection-Dataset.tar.gz"
182
 
183
  _CATEGORIES = ["automobile", "bike", "motorbike", "traffic_light", "traffic_sign"]
184
 
185
- class CARLA_COCO(datasets.GeneratorBasedBuilder):
186
- """Carla-COCO-Object-Detection-Dataset"""
187
 
188
- VERSION = datasets.Version("1.1.0")
 
189
 
190
- def _info(self):
191
- """This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset"""
192
 
 
193
  features = datasets.Features(
194
  {
195
- "id": datasets.Value("int64"),
196
- "image_id": datasets.Value("string"),
197
  "image": datasets.Image(),
198
  "width": datasets.Value("int32"),
199
  "height": datasets.Value("int32"),
200
- "file_name": datasets.Value("string"),
201
- "url": datasets.Value("string"),
202
  "objects": datasets.Sequence(
203
  {
204
- "id": datasets.Sequence(datasets.Value("int64")),
205
- "area": datasets.Sequence(datasets.Value("int64")),
206
  "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
207
- "category": datasets.Sequence(datasets.ClassLabel(names=_CATEGORIES)),
208
  }
209
  ),
210
  }
@@ -214,41 +76,29 @@ class CARLA_COCO(datasets.GeneratorBasedBuilder):
214
  features=features,
215
  homepage=_HOMEPAGE,
216
  license=_LICENSE,
 
217
  )
218
 
219
  def _split_generators(self, dl_manager):
220
- """This method is tasked with downloading/extracting the data and defining the splits depending on the configuration"""
221
-
222
- archive = dl_manager.download_and_extract(_URL)
223
-
224
  return [
225
  datasets.SplitGenerator(
226
  name=datasets.Split.TRAIN,
227
- # These kwargs will be passed to _generate_examples
228
  gen_kwargs={
229
  "annotation_file_path": "annotations/train.json",
230
  "files": dl_manager.iter_archive(archive),
231
- }
232
  ),
233
  datasets.SplitGenerator(
234
  name=datasets.Split.TEST,
235
- # These kwargs will be passed to _generate_examples
236
  gen_kwargs={
237
  "annotation_file_path": "annotations/test.json",
238
  "files": dl_manager.iter_archive(archive),
239
- }
240
  ),
241
  ]
242
 
243
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
244
  def _generate_examples(self, annotation_file_path, files):
245
- """
246
- This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
247
- The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
248
- """
249
-
250
- logger.info("generating examples from = %s", annotation_file_path)
251
-
252
  def process_annot(annot, category_id_to_category):
253
  return {
254
  "id": annot["id"],
@@ -283,3 +133,154 @@ class CARLA_COCO(datasets.GeneratorBasedBuilder):
283
  "objects": objects,
284
  }
285
  idx += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """CPPE-5 dataset."""
16
+
 
 
 
 
17
 
18
  import collections
19
  import json
 
22
  import datasets
23
 
24
 
25
+ _CITATION = """\
26
+ @misc{dagli2021cppe5,
27
+ title={CPPE-5: Medical Personal Protective Equipment Dataset},
28
+ author={Rishit Dagli and Ali Mustufa Shaikh},
29
+ year={2021},
30
+ eprint={2112.09569},
31
+ archivePrefix={arXiv},
32
+ primaryClass={cs.CV}
33
+ }
34
+ """
35
 
36
  _DESCRIPTION = """\
37
+ CPPE - 5 (Medical Personal Protective Equipment) is a new challenging dataset with the goal
38
+ to allow the study of subordinate categorization of medical personal protective equipments,
39
+ which is not possible with other popular data sets that focus on broad level categories.
 
 
 
40
  """
41
 
42
+ _HOMEPAGE = "https://sites.google.com/view/cppe5"
43
 
44
+ _LICENSE = "Unknown"
45
 
46
+ # _URL = "https://drive.google.com/uc?id=1MGnaAfbckUmigGUvihz7uiHGC6rBIbvr"
47
+ _URL = "https://huggingface.co/datasets/yunusskeete/cppe5/resolve/main/cppe5.tar.gz"
 
48
 
49
  _CATEGORIES = ["automobile", "bike", "motorbike", "traffic_light", "traffic_sign"]
50
 
 
 
51
 
52
+ class CPPE5(datasets.GeneratorBasedBuilder):
53
+ """CPPE - 5 dataset."""
54
 
55
+ VERSION = datasets.Version("1.0.0")
 
56
 
57
+ def _info(self):
58
  features = datasets.Features(
59
  {
60
+ "image_id": datasets.Value("int64"),
 
61
  "image": datasets.Image(),
62
  "width": datasets.Value("int32"),
63
  "height": datasets.Value("int32"),
 
 
64
  "objects": datasets.Sequence(
65
  {
66
+ "id": datasets.Value("int64"),
67
+ "area": datasets.Value("int64"),
68
  "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
69
+ "category": datasets.ClassLabel(names=_CATEGORIES),
70
  }
71
  ),
72
  }
 
76
  features=features,
77
  homepage=_HOMEPAGE,
78
  license=_LICENSE,
79
+ citation=_CITATION,
80
  )
81
 
82
  def _split_generators(self, dl_manager):
83
+ archive = dl_manager.download(_URL)
 
 
 
84
  return [
85
  datasets.SplitGenerator(
86
  name=datasets.Split.TRAIN,
 
87
  gen_kwargs={
88
  "annotation_file_path": "annotations/train.json",
89
  "files": dl_manager.iter_archive(archive),
90
+ },
91
  ),
92
  datasets.SplitGenerator(
93
  name=datasets.Split.TEST,
 
94
  gen_kwargs={
95
  "annotation_file_path": "annotations/test.json",
96
  "files": dl_manager.iter_archive(archive),
97
+ },
98
  ),
99
  ]
100
 
 
101
  def _generate_examples(self, annotation_file_path, files):
 
 
 
 
 
 
 
102
  def process_annot(annot, category_id_to_category):
103
  return {
104
  "id": annot["id"],
 
133
  "objects": objects,
134
  }
135
  idx += 1
136
+
137
+ # # coding=utf-8
138
+ # # Permission is hereby granted, free of charge, to any person obtaining
139
+ # # a copy of this software and associated documentation files (the
140
+ # # "Software"), to deal in the Software without restriction, including
141
+ # # without limitation the rights to use, copy, modify, merge, publish,
142
+ # # distribute, sublicense, and/or sell copies of the Software, and to
143
+ # # permit persons to whom the Software is furnished to do so, subject to
144
+ # # the following conditions:
145
+
146
+ # # The above copyright notice and this permission notice shall be
147
+ # # included in all copies or substantial portions of the Software.
148
+
149
+ # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
150
+ # # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
151
+ # # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
152
+ # # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
153
+ # # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
154
+ # # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
155
+ # # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
156
+ # """Carla-COCO-Object-Detection-Dataset"""
157
+
158
+ # import collections
159
+ # import json
160
+ # import os
161
+
162
+ # import datasets
163
+
164
+
165
+ # logger = datasets.logging.get_logger(__name__)
166
+
167
+ # _DESCRIPTION = """\
168
+ # This dataset contains 1028 images each 640x380 pixels.
169
+ # The dataset is split into 249 test and 779 training examples.
170
+ # Every image comes with MS COCO format annotations.
171
+ # The dataset was collected in Carla Simulator, driving around in autopilot mode in various environments
172
+ # (Town01, Town02, Town03, Town04, Town05) and saving every i-th frame.
173
+ # The labels where then automatically generated using the semantic segmentation information.
174
+ # """
175
+
176
+ # _HOMEPAGE = "https://github.com/yunusskeete/Carla-COCO-Object-Detection-Dataset"
177
+
178
+ # _LICENSE = "MIT"
179
+
180
+ # # _URL = "https://drive.google.com/uc?id=1QeveFt1jDNrafJeeCV1N_KoIKQEZyhuf"
181
+ # # # _URL = "https://drive.google.com/uc?id=1xUPwrMBBrGFIapLx_fyLjmH4HN16A4iZ"
182
+ # _URL = "https://huggingface.co/datasets/yunusskeete/Carla-COCO-Object-Detection-Dataset/resolve/main/Carla-COCO-Object-Detection-Dataset.tar.gz"
183
+
184
+ # _CATEGORIES = ["automobile", "bike", "motorbike", "traffic_light", "traffic_sign"]
185
+
186
+ # class CARLA_COCO(datasets.GeneratorBasedBuilder):
187
+ # """Carla-COCO-Object-Detection-Dataset"""
188
+
189
+ # VERSION = datasets.Version("1.1.0")
190
+
191
+ # def _info(self):
192
+ # """This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset"""
193
+
194
+ # features = datasets.Features(
195
+ # {
196
+ # "id": datasets.Value("int64"),
197
+ # "image_id": datasets.Value("string"),
198
+ # "image": datasets.Image(),
199
+ # "width": datasets.Value("int32"),
200
+ # "height": datasets.Value("int32"),
201
+ # "file_name": datasets.Value("string"),
202
+ # "url": datasets.Value("string"),
203
+ # "objects": datasets.Sequence(
204
+ # {
205
+ # "id": datasets.Sequence(datasets.Value("int64")),
206
+ # "area": datasets.Sequence(datasets.Value("int64")),
207
+ # "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
208
+ # "category": datasets.Sequence(datasets.ClassLabel(names=_CATEGORIES)),
209
+ # }
210
+ # ),
211
+ # }
212
+ # )
213
+ # return datasets.DatasetInfo(
214
+ # description=_DESCRIPTION,
215
+ # features=features,
216
+ # homepage=_HOMEPAGE,
217
+ # license=_LICENSE,
218
+ # )
219
+
220
+ # def _split_generators(self, dl_manager):
221
+ # """This method is tasked with downloading/extracting the data and defining the splits depending on the configuration"""
222
+
223
+ # archive = dl_manager.download_and_extract(_URL)
224
+
225
+ # return [
226
+ # datasets.SplitGenerator(
227
+ # name=datasets.Split.TRAIN,
228
+ # # These kwargs will be passed to _generate_examples
229
+ # gen_kwargs={
230
+ # "annotation_file_path": "annotations/train.json",
231
+ # "files": dl_manager.iter_archive(archive),
232
+ # }
233
+ # ),
234
+ # datasets.SplitGenerator(
235
+ # name=datasets.Split.TEST,
236
+ # # These kwargs will be passed to _generate_examples
237
+ # gen_kwargs={
238
+ # "annotation_file_path": "annotations/test.json",
239
+ # "files": dl_manager.iter_archive(archive),
240
+ # }
241
+ # ),
242
+ # ]
243
+
244
+ # # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
245
+ # def _generate_examples(self, annotation_file_path, files):
246
+ # """
247
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
248
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
249
+ # """
250
+
251
+ # logger.info("generating examples from = %s", annotation_file_path)
252
+
253
+ # def process_annot(annot, category_id_to_category):
254
+ # return {
255
+ # "id": annot["id"],
256
+ # "area": annot["area"],
257
+ # "bbox": annot["bbox"],
258
+ # "category": category_id_to_category[annot["category_id"]],
259
+ # }
260
+
261
+ # image_id_to_image = {}
262
+ # idx = 0
263
+ # # This loop relies on the ordering of the files in the archive:
264
+ # # Annotation files come first, then the images.
265
+ # for path, f in files:
266
+ # file_name = os.path.basename(path)
267
+ # if path == annotation_file_path:
268
+ # annotations = json.load(f)
269
+ # category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
270
+ # image_id_to_annotations = collections.defaultdict(list)
271
+ # for annot in annotations["annotations"]:
272
+ # image_id_to_annotations[annot["image_id"]].append(annot)
273
+ # image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]}
274
+ # elif file_name in image_id_to_image:
275
+ # image = image_id_to_image[file_name]
276
+ # objects = [
277
+ # process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
278
+ # ]
279
+ # yield idx, {
280
+ # "image_id": image["id"],
281
+ # "image": {"path": path, "bytes": f.read()},
282
+ # "width": image["width"],
283
+ # "height": image["height"],
284
+ # "objects": objects,
285
+ # }
286
+ # idx += 1
Carla-COCO-Object-Detection-Dataset.tar.gz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f0d57eeaab1b898a57bed387aa9fbf462f54e5348d0bc8b6a8b09cffaa9ae9fe
3
- size 396708471
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e4c74a169b5bbeeeaeed30264986cddafb817635cf61ba94c1d893da4df3171
3
+ size 396704813
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"default": {"description": "CPPE - 5 (Medical Personal Protective Equipment) is a new challenging dataset with the goal\nto allow the study of subordinate categorization of medical personal protective equipments,\nwhich is not possible with other popular data sets that focus on broad level categories.\n", "citation": "@misc{dagli2021cppe5,\n title={CPPE-5: Medical Personal Protective Equipment Dataset},\n author={Rishit Dagli and Ali Mustufa Shaikh},\n year={2021},\n eprint={2112.09569},\n archivePrefix={arXiv},\n primaryClass={cs.CV}\n}\n", "homepage": "https://sites.google.com/view/cppe5", "license": "Unknown", "features": {"image_id": {"dtype": "int64", "id": null, "_type": "Value"}, "image": {"id": null, "_type": "Image"}, "width": {"dtype": "int32", "id": null, "_type": "Value"}, "height": {"dtype": "int32", "id": null, "_type": "Value"}, "objects": {"feature": {"id": {"dtype": "int64", "id": null, "_type": "Value"}, "area": {"dtype": "int64", "id": null, "_type": "Value"}, "bbox": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": 4, "id": null, "_type": "Sequence"}, "category": {"num_classes": 5, "names": ["Coverall", "Face_Shield", "Gloves", "Goggles", "Mask"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "cppe5", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 240481281, "num_examples": 1000, "dataset_name": "cppe5"}, "test": {"name": "test", "num_bytes": 4172739, "num_examples": 29, "dataset_name": "cppe5"}}, "download_checksums": {"https://drive.google.com/uc?id=1MGnaAfbckUmigGUvihz7uiHGC6rBIbvr": {"num_bytes": 238482705, "checksum": "1151086e59fcb87825ecf4d362847a3f023ba69e7ace0f513d5aadc0e3dd3094"}}, "download_size": 238482705, "post_processing_size": null, "dataset_size": 244654020, "size_in_bytes": 483136725}}
 
1
+ {"default": {"description": "CPPE - 5 (Medical Personal Protective Equipment) is a new challenging dataset with the goal\nto allow the study of subordinate categorization of medical personal protective equipments,\nwhich is not possible with other popular data sets that focus on broad level categories.\n", "citation": "@misc{dagli2021cppe5,\n title={CPPE-5: Medical Personal Protective Equipment Dataset},\n author={Rishit Dagli and Ali Mustufa Shaikh},\n year={2021},\n eprint={2112.09569},\n archivePrefix={arXiv},\n primaryClass={cs.CV}\n}\n", "homepage": "https://sites.google.com/view/cppe5", "license": "Unknown", "features": {"image_id": {"dtype": "int64", "id": null, "_type": "Value"}, "image": {"id": null, "_type": "Image"}, "width": {"dtype": "int32", "id": null, "_type": "Value"}, "height": {"dtype": "int32", "id": null, "_type": "Value"}, "objects": {"feature": {"id": {"dtype": "int64", "id": null, "_type": "Value"}, "area": {"dtype": "int64", "id": null, "_type": "Value"}, "bbox": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": 4, "id": null, "_type": "Sequence"}, "category": {"num_classes": 5, "names": ["automobile", "bike", "motorbike", "traffic_light", "traffic_sign"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "cppe5", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 240481281, "num_examples": 779, "dataset_name": "cppe5"}, "test": {"name": "test", "num_bytes": 4172739, "num_examples": 249, "dataset_name": "cppe5"}}, "download_checksums": {"https://drive.google.com/uc?id=1MGnaAfbckUmigGUvihz7uiHGC6rBIbvr": {"num_bytes": 238482705, "checksum": "1151086e59fcb87825ecf4d362847a3f023ba69e7ace0f513d5aadc0e3dd3094"}}, "download_size": 238482705, "post_processing_size": null, "dataset_size": 244654020, "size_in_bytes": 483136725}}