Datasets:
ArXiv:
DOI:
License:
# %% | |
import numpy as np | |
from datasets import load_dataset | |
from torch.utils.data import DataLoader | |
# quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="station_test", split="test") | |
quakeflow_nc = load_dataset( | |
"./quakeflow_nc.py", | |
name="station_test", | |
# name="event_test", | |
split="test", | |
download_mode="force_redownload", | |
) | |
# print the first sample of the iterable dataset | |
for example in quakeflow_nc: | |
print("\nIterable test\n") | |
print(example.keys()) | |
for key in example.keys(): | |
if key == "data": | |
print(key, np.array(example[key]).shape) | |
else: | |
print(key, example[key]) | |
break | |
# %% | |
quakeflow_nc = quakeflow_nc.with_format("torch") | |
dataloader = DataLoader(quakeflow_nc, batch_size=8, num_workers=0, collate_fn=lambda x: x) | |
for batch in dataloader: | |
print("\nDataloader test\n") | |
print(f"Batch size: {len(batch)}") | |
print(batch[0].keys()) | |
for key in batch[0].keys(): | |
if key == "data": | |
print(key, np.array(batch[0][key]).shape) | |
else: | |
print(key, batch[0][key]) | |
break | |
# %% | |