Yahaira commited on
Commit
44b9406
1 Parent(s): 18c41ee

Upload 4 files

Browse files
Files changed (4) hide show
  1. anemia.py +80 -0
  2. data/test.zip +3 -0
  3. data/train.zip +3 -0
  4. data/validation.zip +3 -0
anemia.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import datasets
3
+ from datasets.tasks import ImageClassification
4
+
5
+
6
+ _CITATION = """\
7
+ @data{t5s2-4j73-22,
8
+ doi = {10.21227/t5s2-4j73},
9
+ url = {https://dx.doi.org/10.21227/t5s2-4j73},
10
+ author = {Dimauro, Giovanni and Maglietta, Rosalia and Bai, Thulasi and Kasiviswanathan, Sivachandar},
11
+ publisher = {IEEE Dataport},
12
+ title = {Eyes-defy-anemia},
13
+ year = {2022} }
14
+ """
15
+
16
+ _DESCRIPTION = """\
17
+ The dataset Eyes-defy-anemia contains 218 images of eyes, in particular
18
+ conjunctivas, which can be used for research on the diagnosis/estimation
19
+ of anemia based on the pallor of conjunctiva.
20
+ """
21
+
22
+ _URLS = {
23
+ "train": "https://huggingface.co/datasets/Yahaira/anemia-eyes/blob/main/data/train.zip",
24
+ "validation": "https://huggingface.co/datasets/Yahaira/anemia-eyes/blob/main/data/validation.zip",
25
+ "test": "https://huggingface.co/datasets/Yahaira/anemia-eyes/blob/main/data/test.zip",
26
+ }
27
+
28
+ _NAMES = ["Anemia, NoAnemia"]
29
+
30
+
31
+ class Anemia(datasets.GeneratorBasedBuilder):
32
+ """Beans plant leaf images dataset."""
33
+
34
+ def _info(self):
35
+ return datasets.DatasetInfo(
36
+ description=_DESCRIPTION,
37
+ features=datasets.Features(
38
+ {
39
+ "image_file_path": datasets.Value("string"),
40
+ "image": datasets.Image(),
41
+ "labels": datasets.features.ClassLabel(names=_NAMES),
42
+ }
43
+ ),
44
+ supervised_keys=("image", "labels"),
45
+ citation=_CITATION,
46
+ task_templates=[ImageClassification(image_column="image", label_column="labels")],
47
+ )
48
+
49
+ def _split_generators(self, dl_manager):
50
+ data_files = dl_manager.download_and_extract(_URLS)
51
+ return [
52
+ datasets.SplitGenerator(
53
+ name=datasets.Split.TRAIN,
54
+ gen_kwargs={
55
+ "files": dl_manager.iter_files([data_files["train"]]),
56
+ },
57
+ ),
58
+ datasets.SplitGenerator(
59
+ name=datasets.Split.VALIDATION,
60
+ gen_kwargs={
61
+ "files": dl_manager.iter_files([data_files["validation"]]),
62
+ },
63
+ ),
64
+ datasets.SplitGenerator(
65
+ name=datasets.Split.TEST,
66
+ gen_kwargs={
67
+ "files": dl_manager.iter_files([data_files["test"]]),
68
+ },
69
+ ),
70
+ ]
71
+
72
+ def _generate_examples(self, files):
73
+ for i, path in enumerate(files):
74
+ file_name = os.path.basename(path)
75
+ if file_name.endswith(".jpg"):
76
+ yield i, {
77
+ "image_file_path": path,
78
+ "image": path,
79
+ "labels": os.path.basename(os.path.dirname(path)).lower(),
80
+ }
data/test.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c526dc85c97e2818abb321a5649857c1f994c2d072604f48a2ad21347915e64d
3
+ size 53673267
data/train.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cabf0fc7248e5ab12b4038091232458d8649939eb88aea2de92b8d7b10e94ed4
3
+ size 421003693
data/validation.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11c99025c3ba5f2523963f826953cf0862b584cb057279a4eddbd2d3fe88666e
3
+ size 51176131