chanelcolgate commited on
Commit
17be7f1
1 Parent(s): 71de4de

new file: tumorsbrain.py

Browse files
Files changed (2) hide show
  1. README.md +32 -3
  2. tumorsbrain.py +214 -0
README.md CHANGED
@@ -1,7 +1,36 @@
1
  ---
2
- # For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1
3
- # Doc / guide: https://huggingface.co/docs/hub/datasets-cards
4
- {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  ---
6
 
7
  # Dataset Card for Dataset Name
 
1
  ---
2
+ dataset_info:
3
+ features:
4
+ - name: image
5
+ dtype: image
6
+ - name: image_id
7
+ dtype: int64
8
+ - name: objects
9
+ sequence:
10
+ - name: id
11
+ dtype: int64
12
+ - name: area
13
+ dtype: float64
14
+ - name: bbox
15
+ sequence: float32
16
+ length: 4
17
+ - name: label
18
+ dtype:
19
+ class_label:
20
+ names:
21
+ '0': negative
22
+ '1': positive
23
+ - name: iscrowd
24
+ dtype: bool
25
+ splits:
26
+ - name: train
27
+ num_bytes: 11482275
28
+ num_examples: 893
29
+ - name: test
30
+ num_bytes: 2794404
31
+ num_examples: 223
32
+ download_size: 12628405
33
+ dataset_size: 14276679
34
  ---
35
 
36
  # Dataset Card for Dataset Name
tumorsbrain.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from pathlib import Path
4
+ from typing import Dict, Any, List, Union, Iterator, Tuple
5
+
6
+ import datasets
7
+ from datasets.download.download_manager import DownloadManager, ArchiveIterable
8
+
9
+ # Typing
10
+ _TYPING_BOX = Tuple[float, float, float, float]
11
+
12
+ _DESCRIPTION = """\
13
+ Training image sets and labels/bounding box coordinates for detecting brain
14
+ tumors in MR images.
15
+ - The datasets JPGs exported at their native size and are separated by plan
16
+ (Axial, Coronal and Sagittal).
17
+ - Tumors were hand labeled using https://makesense.ai
18
+ - Bounding box coordinates and MGMT positive labels were marked on ~400 images
19
+ for each plane in the T1wCE series from the RSNA-MICCAI competition data.
20
+ """
21
+
22
+ _URLS = {
23
+ "train": "https://huggingface.co/datasets/chanelcolgate/tumorsbrain/resolve/main/data/train.zip",
24
+ "test": "https://huggingface.co/datasets/chanelcolgate/tumorsbrain/resolve/main/data/test.zip",
25
+ "annotations": "https://huggingface.co/datasets/chanelcolgate/tumorsbrain/resolve/main/data/annotations.zip",
26
+ }
27
+
28
+ _PATHS = {
29
+ "annotations": {
30
+ "train": Path("_annotations.coco.train.json"),
31
+ "test": Path("_annotations.coco.test.json"),
32
+ },
33
+ "images": {"train": Path("train"), "test": Path("test")},
34
+ }
35
+
36
+ _CLASSES = ["negative", "positive"]
37
+
38
+ _SPLITS = ["train", "test"]
39
+
40
+
41
+ def round_box_values(box, decimals=2):
42
+ return [round(val, decimals) for val in box]
43
+
44
+
45
+ class COCOHelper:
46
+ """Helper class to load COCO annotations"""
47
+
48
+ def __init__(self, annotation_path: Path, images_dir: Path) -> None:
49
+ with open(annotation_path, "r") as file:
50
+ data = json.load(file)
51
+ self.data = data
52
+
53
+ dict_id2annot: Dict[int, Any] = {}
54
+ for annot in self.annotations:
55
+ dict_id2annot.setdefault(annot["image_id"], []).append(annot)
56
+
57
+ # Sort by id
58
+ dict_id2annot = {
59
+ k: list(sorted(v, key=lambda a: a["id"]))
60
+ for k, v in dict_id2annot.items()
61
+ }
62
+
63
+ self.dict_path2annot: Dict[str, Any] = {}
64
+ self.dict_path2id: Dict[str, Any] = {}
65
+ for img in self.images:
66
+ path_img = images_dir / str(img["file_name"])
67
+ path_img_str = str(path_img)
68
+ idx = int(img["id"])
69
+ annot = dict_id2annot.get(idx, [])
70
+ self.dict_path2annot[path_img_str] = annot
71
+ self.dict_path2id[path_img_str] = img["id"]
72
+
73
+ def __len__(self) -> int:
74
+ return len(self.data["images"])
75
+
76
+ @property
77
+ def images(self) -> List[Dict[str, Union[str, int]]]:
78
+ return self.data["images"]
79
+
80
+ @property
81
+ def annotations(self) -> List[Any]:
82
+ return self.data["annotations"]
83
+
84
+ @property
85
+ def categories(self) -> List[Dict[str, Union[str, int]]]:
86
+ return self.data["categories"]
87
+
88
+ def get_annotations(self, image_path: str) -> List[Any]:
89
+ return self.dict_path2annot.get(image_path, [])
90
+
91
+ def get_image_id(self, image_path: str) -> int:
92
+ return self.dict_path2id.get(image_path, -1)
93
+
94
+
95
+ class COCOThienviet(datasets.GeneratorBasedBuilder):
96
+ """COCO Thienviet dataset."""
97
+
98
+ VERSION = datasets.Version("1.0.1")
99
+
100
+ def _info(self) -> datasets.DatasetInfo:
101
+ """
102
+ Return the dataset metadata and features.
103
+
104
+ Returns:
105
+ DatasetInfo: Metadata and features of the dataset.
106
+ """
107
+ return datasets.DatasetInfo(
108
+ description=_DESCRIPTION,
109
+ features=datasets.Features(
110
+ {
111
+ "image": datasets.Image(),
112
+ "image_id": datasets.Value("int64"),
113
+ "objects": datasets.Sequence(
114
+ {
115
+ "id": datasets.Value("int64"),
116
+ "area": datasets.Value("float64"),
117
+ "bbox": datasets.Sequence(
118
+ datasets.Value("float32"), length=4
119
+ ),
120
+ "label": datasets.ClassLabel(names=_CLASSES),
121
+ "iscrowd": datasets.Value("bool"),
122
+ }
123
+ ),
124
+ }
125
+ ),
126
+ )
127
+
128
+ def _split_generators(
129
+ self, dl_manager: DownloadManager
130
+ ) -> List[datasets.SplitGenerator]:
131
+ """
132
+ Provides the split information and downloads the data.
133
+
134
+ Args:
135
+ dl_manager (DownloadManager): The DownloadManager to use for
136
+ downloading and extracting data.
137
+
138
+ Returns:
139
+ List[SplitGenerator]: List of SplitGenerator objects representing
140
+ the data splits.
141
+ """
142
+ archive_annots = dl_manager.download_and_extract(_URLS["annotations"])
143
+
144
+ splits = []
145
+ for split in _SPLITS:
146
+ archive_split = dl_manager.download(_URLS[split])
147
+ annotation_path = (
148
+ Path(archive_annots) / _PATHS["annotations"][split]
149
+ )
150
+ images = dl_manager.iter_archive(archive_split)
151
+ if split == "train":
152
+ splits.append(
153
+ datasets.SplitGenerator(
154
+ name=datasets.Split.TRAIN,
155
+ gen_kwargs={
156
+ "annotation_path": annotation_path,
157
+ "images_dir": _PATHS["images"][split],
158
+ "images": images,
159
+ },
160
+ )
161
+ )
162
+ else:
163
+ splits.append(
164
+ datasets.SplitGenerator(
165
+ name=datasets.Split.TEST,
166
+ gen_kwargs={
167
+ "annotation_path": annotation_path,
168
+ "images_dir": _PATHS["images"][split],
169
+ "images": images,
170
+ },
171
+ )
172
+ )
173
+ return splits
174
+
175
+ def _generate_examples(
176
+ self, annotation_path: Path, images_dir: Path, images: ArchiveIterable
177
+ ) -> Iterator:
178
+ """
179
+ Generates examples for the dataset.
180
+
181
+ Args:
182
+ annotation_path (Path): The path to the annotation file.
183
+ images_dir (Path): The path to the directory containing the images.
184
+ images: (ArchiveIterable): An iterable containing the images.
185
+
186
+ Yields:
187
+ Dict[str, Union[str, Image]]: A dictionary containing the
188
+ generated examples.
189
+ """
190
+ coco_annotation = COCOHelper(annotation_path, images_dir)
191
+
192
+ for image_path, f in images:
193
+ annotations = coco_annotation.get_annotations(
194
+ os.path.normpath(image_path)
195
+ )
196
+ ret = {
197
+ "image": {"path": image_path, "bytes": f.read()},
198
+ "image_id": coco_annotation.get_image_id(
199
+ os.path.normpath(image_path)
200
+ ),
201
+ "objects": [
202
+ {
203
+ "id": annot["id"],
204
+ "area": annot["area"],
205
+ "bbox": round_box_values(
206
+ annot["bbox"], 2
207
+ ), # [x, y, w, h]
208
+ "label": annot["category_id"],
209
+ "iscrowd": bool(annot["iscrowd"]),
210
+ }
211
+ for annot in annotations
212
+ ],
213
+ }
214
+ yield image_path, ret