Datasets:
Modalities:
3D
Size:
10K<n<100K
File size: 1,964 Bytes
0326d8c 6bd4034 0326d8c 9904afd a4907cc 9904afd c2ac2e7 0326d8c c2ac2e7 0326d8c 6bd4034 c2ac2e7 6bd4034 c2ac2e7 6bd4034 c2ac2e7 6bd4034 0326d8c c2ac2e7 0326d8c c2ac2e7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import json
import os
import datasets
class WindtunnelDataset(datasets.GeneratorBasedBuilder):
"""Dataset for loading simulation data with JSON and mesh files"""
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"coeff": datasets.Value("binary"), # JSON file as a dictionary
"input": datasets.Value("binary"), # JSON file as a dictionary
"input_mesh": datasets.Value("binary"), # OBJ mesh file as binary
"openfoam_mesh": datasets.Value(
"binary"
), # OBJ mesh file as binary
"pressure_field_mesh": datasets.Value(
"binary"
), # VTK file as binary
"streamlines_mesh": datasets.Value("binary"), # PLY file as binary
}
),
homepage="https://inductiva.ai",
)
def _split_generators(self, dl_manager):
"""Define the splits for the dataset."""
train_dir = "train"
val_dir = "validation"
test_dir = "test"
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"path": train_dir}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"path": val_dir},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"path": test_dir},
),
]
def _generate_examples(self, path):
"""Generate examples for each split."""
id = 0
for root, dirs, files in os.walk(path):
for file in files:
yield (
id,
{
"label": id,
},
)
id += 1
|