Datasets:
ArXiv:
DOI:
License:
Merge branch 'main' of hf.co:datasets/AI4EPS/quakeflow_nc
Browse files- README.md +34 -60
- example.py +41 -0
- quakeflow_nc.py +28 -21
README.md
CHANGED
@@ -66,7 +66,7 @@ Waveform data, metadata, or data products for this study were accessed through t
|
|
66 |
- datasets
|
67 |
- h5py
|
68 |
- fsspec
|
69 |
-
-
|
70 |
|
71 |
### Usage
|
72 |
Import the necessary packages:
|
@@ -74,7 +74,6 @@ Import the necessary packages:
|
|
74 |
import h5py
|
75 |
import numpy as np
|
76 |
import torch
|
77 |
-
from torch.utils.data import Dataset, IterableDataset, DataLoader
|
78 |
from datasets import load_dataset
|
79 |
```
|
80 |
We have 6 configurations for the dataset:
|
@@ -89,16 +88,28 @@ We have 6 configurations for the dataset:
|
|
89 |
|
90 |
The sample of `station` is a dictionary with the following keys:
|
91 |
- `data`: the waveform with shape `(3, nt)`, the default time length is 8192
|
92 |
-
- `
|
93 |
-
- `
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
- `station_location`: the station location with shape `(3,)`, including latitude, longitude and depth
|
95 |
|
96 |
The sample of `event` is a dictionary with the following keys:
|
97 |
- `data`: the waveform with shape `(n_station, 3, nt)`, the default time length is 8192
|
98 |
-
- `
|
99 |
-
- `
|
100 |
-
- `
|
101 |
-
- `
|
|
|
|
|
|
|
|
|
|
|
102 |
- `station_location`: the space coordinates of the station with shape `(n_station, 3)`, including latitude, longitude and depth
|
103 |
|
104 |
The default configuration is `station_test`. You can specify the configuration by argument `name`. For example:
|
@@ -117,70 +128,33 @@ quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="station_test", split="t
|
|
117 |
quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="event", split="train")
|
118 |
```
|
119 |
|
120 |
-
####
|
121 |
-
Then you can change the dataset into PyTorch format iterable dataset, and view the first sample:
|
122 |
```python
|
123 |
quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="station_test", split="test")
|
124 |
-
# for PyTorch DataLoader, we need to divide the dataset into several shards
|
125 |
-
num_workers=4
|
126 |
-
quakeflow_nc = quakeflow_nc.to_iterable_dataset(num_shards=num_workers)
|
127 |
-
# because add examples formatting to get tensors when using the "torch" format
|
128 |
-
# has not been implemented yet, we need to manually add the formatting when using iterable dataset
|
129 |
-
# if you want to use dataset directly, just use
|
130 |
-
# quakeflow_nc.with_format("torch")
|
131 |
-
quakeflow_nc = quakeflow_nc.map(lambda x: {key: torch.from_numpy(np.array(value, dtype=np.float32)) for key, value in x.items()})
|
132 |
-
try:
|
133 |
-
isinstance(quakeflow_nc, torch.utils.data.IterableDataset)
|
134 |
-
except:
|
135 |
-
raise Exception("quakeflow_nc is not an IterableDataset")
|
136 |
|
137 |
# print the first sample of the iterable dataset
|
138 |
for example in quakeflow_nc:
|
139 |
print("\nIterable test\n")
|
140 |
print(example.keys())
|
141 |
for key in example.keys():
|
142 |
-
|
|
|
|
|
|
|
143 |
break
|
144 |
|
145 |
-
|
|
|
|
|
146 |
|
147 |
for batch in dataloader:
|
148 |
print("\nDataloader test\n")
|
149 |
-
print(batch
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
Then you can change the dataset into PyTorch format dataset, and view the first sample (Don't forget to reorder the keys):
|
158 |
-
```python
|
159 |
-
quakeflow_nc = datasets.load_dataset("AI4EPS/quakeflow_nc", split="test", name="event_test")
|
160 |
-
|
161 |
-
# for PyTorch DataLoader, we need to divide the dataset into several shards
|
162 |
-
num_workers=4
|
163 |
-
quakeflow_nc = quakeflow_nc.to_iterable_dataset(num_shards=num_workers)
|
164 |
-
quakeflow_nc = quakeflow_nc.map(lambda x: {key: torch.from_numpy(np.array(value, dtype=np.float32)) for key, value in x.items()})
|
165 |
-
try:
|
166 |
-
isinstance(quakeflow_nc, torch.utils.data.IterableDataset)
|
167 |
-
except:
|
168 |
-
raise Exception("quakeflow_nc is not an IterableDataset")
|
169 |
-
|
170 |
-
# print the first sample of the iterable dataset
|
171 |
-
for example in quakeflow_nc:
|
172 |
-
print("\nIterable test\n")
|
173 |
-
print(example.keys())
|
174 |
-
for key in example.keys():
|
175 |
-
print(key, example[key].shape, example[key].dtype)
|
176 |
-
break
|
177 |
-
|
178 |
-
dataloader = DataLoader(quakeflow_nc, batch_size=1, num_workers=num_workers)
|
179 |
-
|
180 |
-
for batch in dataloader:
|
181 |
-
print("\nDataloader test\n")
|
182 |
-
print(batch.keys())
|
183 |
-
for key in batch.keys():
|
184 |
-
print(key, batch[key].shape, batch[key].dtype)
|
185 |
break
|
186 |
```
|
|
|
66 |
- datasets
|
67 |
- h5py
|
68 |
- fsspec
|
69 |
+
- pytorch
|
70 |
|
71 |
### Usage
|
72 |
Import the necessary packages:
|
|
|
74 |
import h5py
|
75 |
import numpy as np
|
76 |
import torch
|
|
|
77 |
from datasets import load_dataset
|
78 |
```
|
79 |
We have 6 configurations for the dataset:
|
|
|
88 |
|
89 |
The sample of `station` is a dictionary with the following keys:
|
90 |
- `data`: the waveform with shape `(3, nt)`, the default time length is 8192
|
91 |
+
- `begin_time`: the begin time of the waveform data
|
92 |
+
- `end_time`: the end time of the waveform data
|
93 |
+
- `phase_time`: the phase arrival time
|
94 |
+
- `phase_index`: the time point index of the phase arrival time
|
95 |
+
- `phase_type`: the phase type
|
96 |
+
- `phase_polarity`: the phase polarity in ('U', 'D', 'N')
|
97 |
+
- `event_time`: the event time
|
98 |
+
- `event_time_index`: the time point index of the event time
|
99 |
+
- `event_location`: the event location with shape `(3,)`, including latitude, longitude, depth
|
100 |
- `station_location`: the station location with shape `(3,)`, including latitude, longitude and depth
|
101 |
|
102 |
The sample of `event` is a dictionary with the following keys:
|
103 |
- `data`: the waveform with shape `(n_station, 3, nt)`, the default time length is 8192
|
104 |
+
- `begin_time`: the begin time of the waveform data
|
105 |
+
- `end_time`: the end time of the waveform data
|
106 |
+
- `phase_time`: the phase arrival time with shape `(n_station,)`
|
107 |
+
- `phase_index`: the time point index of the phase arrival time with shape `(n_station,)`
|
108 |
+
- `phase_type`: the phase type with shape `(n_station,)`
|
109 |
+
- `phase_polarity`: the phase polarity in ('U', 'D', 'N') with shape `(n_station,)`
|
110 |
+
- `event_time`: the event time
|
111 |
+
- `event_time_index`: the time point index of the event time
|
112 |
+
- `event_location`: the space-time coordinates of the event with shape `(n_staion, 3)`
|
113 |
- `station_location`: the space coordinates of the station with shape `(n_station, 3)`, including latitude, longitude and depth
|
114 |
|
115 |
The default configuration is `station_test`. You can specify the configuration by argument `name`. For example:
|
|
|
128 |
quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="event", split="train")
|
129 |
```
|
130 |
|
131 |
+
#### Example loading the dataset
|
|
|
132 |
```python
|
133 |
quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="station_test", split="test")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
|
135 |
# print the first sample of the iterable dataset
|
136 |
for example in quakeflow_nc:
|
137 |
print("\nIterable test\n")
|
138 |
print(example.keys())
|
139 |
for key in example.keys():
|
140 |
+
if key == "data":
|
141 |
+
print(key, np.array(example[key]).shape)
|
142 |
+
else:
|
143 |
+
print(key, example[key])
|
144 |
break
|
145 |
|
146 |
+
# %%
|
147 |
+
quakeflow_nc = quakeflow_nc.with_format("torch")
|
148 |
+
dataloader = DataLoader(quakeflow_nc, batch_size=8, num_workers=0, collate_fn=lambda x: x)
|
149 |
|
150 |
for batch in dataloader:
|
151 |
print("\nDataloader test\n")
|
152 |
+
print(f"Batch size: {len(batch)}")
|
153 |
+
print(batch[0].keys())
|
154 |
+
for key in batch[0].keys():
|
155 |
+
if key == "data":
|
156 |
+
print(key, np.array(batch[0][key]).shape)
|
157 |
+
else:
|
158 |
+
print(key, batch[0][key])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
break
|
160 |
```
|
example.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# %%
|
2 |
+
import numpy as np
|
3 |
+
from datasets import load_dataset
|
4 |
+
from torch.utils.data import DataLoader
|
5 |
+
|
6 |
+
# quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="station_test", split="test")
|
7 |
+
quakeflow_nc = load_dataset(
|
8 |
+
"./quakeflow_nc.py",
|
9 |
+
name="station_test",
|
10 |
+
# name="event_test",
|
11 |
+
split="test",
|
12 |
+
download_mode="force_redownload",
|
13 |
+
)
|
14 |
+
|
15 |
+
# print the first sample of the iterable dataset
|
16 |
+
for example in quakeflow_nc:
|
17 |
+
print("\nIterable test\n")
|
18 |
+
print(example.keys())
|
19 |
+
for key in example.keys():
|
20 |
+
if key == "data":
|
21 |
+
print(key, np.array(example[key]).shape)
|
22 |
+
else:
|
23 |
+
print(key, example[key])
|
24 |
+
break
|
25 |
+
|
26 |
+
# %%
|
27 |
+
quakeflow_nc = quakeflow_nc.with_format("torch")
|
28 |
+
dataloader = DataLoader(quakeflow_nc, batch_size=8, num_workers=0, collate_fn=lambda x: x)
|
29 |
+
|
30 |
+
for batch in dataloader:
|
31 |
+
print("\nDataloader test\n")
|
32 |
+
print(f"Batch size: {len(batch)}")
|
33 |
+
print(batch[0].keys())
|
34 |
+
for key in batch[0].keys():
|
35 |
+
if key == "data":
|
36 |
+
print(key, np.array(batch[0][key]).shape)
|
37 |
+
else:
|
38 |
+
print(key, batch[0][key])
|
39 |
+
break
|
40 |
+
|
41 |
+
# %%
|
quakeflow_nc.py
CHANGED
@@ -50,7 +50,7 @@ _LICENSE = ""
|
|
50 |
# TODO: Add link to the official dataset URLs here
|
51 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
52 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
53 |
-
_REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/
|
54 |
_FILES = [
|
55 |
"1987.h5",
|
56 |
"1988.h5",
|
@@ -104,14 +104,10 @@ class BatchBuilderConfig(datasets.BuilderConfig):
|
|
104 |
"""
|
105 |
yield a batch of event-based sample, so the number of sample stations can vary among batches
|
106 |
Batch Config for QuakeFlow_NC
|
107 |
-
:param batch_size: number of samples in a batch
|
108 |
-
:param num_stations_list: possible number of stations in a batch
|
109 |
"""
|
110 |
|
111 |
-
def __init__(self,
|
112 |
super().__init__(**kwargs)
|
113 |
-
self.batch_size = batch_size
|
114 |
-
self.num_stations_list = num_stations_list
|
115 |
|
116 |
|
117 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
@@ -172,13 +168,14 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
172 |
features = datasets.Features(
|
173 |
{
|
174 |
"data": datasets.Array2D(shape=(3, self.nt), dtype="float32"),
|
175 |
-
"phase_pick": datasets.Array2D(shape=(3, self.nt), dtype="float32"),
|
176 |
"phase_time": datasets.Sequence(datasets.Value("string")),
|
177 |
"phase_index": datasets.Sequence(datasets.Value("int32")),
|
178 |
"phase_type": datasets.Sequence(datasets.Value("string")),
|
179 |
"phase_polarity": datasets.Sequence(datasets.Value("string")),
|
180 |
"begin_time": datasets.Value("string"),
|
181 |
"end_time": datasets.Value("string"),
|
|
|
|
|
182 |
"event_location": datasets.Sequence(datasets.Value("float32")),
|
183 |
"station_location": datasets.Sequence(datasets.Value("float32")),
|
184 |
},
|
@@ -186,18 +183,21 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
186 |
elif (self.config.name == "event") or (self.config.name == "event_train") or (self.config.name == "event_test"):
|
187 |
features = datasets.Features(
|
188 |
{
|
189 |
-
"data": datasets.
|
190 |
-
"
|
191 |
-
"
|
192 |
-
"
|
193 |
-
"
|
194 |
-
"phase_polarity": datasets.Sequence(datasets.Value("string")),
|
195 |
"begin_time": datasets.Value("string"),
|
196 |
"end_time": datasets.Value("string"),
|
|
|
|
|
197 |
"event_location": datasets.Sequence(datasets.Value("float32")),
|
198 |
-
"station_location": datasets.Sequence(datasets.Value("float32")),
|
199 |
},
|
200 |
)
|
|
|
|
|
201 |
|
202 |
return datasets.DatasetInfo(
|
203 |
# This is the description that will appear on the datasets page.
|
@@ -224,7 +224,8 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
224 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
225 |
urls = _URLS[self.config.name]
|
226 |
# files = dl_manager.download(urls)
|
227 |
-
files = dl_manager.download_and_extract(urls)
|
|
|
228 |
print(files)
|
229 |
|
230 |
if self.config.name == "station" or self.config.name == "event":
|
@@ -280,9 +281,12 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
280 |
event_attrs["longitude"],
|
281 |
event_attrs["latitude"],
|
282 |
event_attrs["depth_km"],
|
283 |
-
event_attrs["event_time_index"],
|
284 |
]
|
|
|
|
|
285 |
station_ids = list(event.keys())
|
|
|
|
|
286 |
if (
|
287 |
(self.config.name == "station")
|
288 |
or (self.config.name == "station_train")
|
@@ -307,6 +311,8 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
307 |
"phase_polarity": phase_polarity,
|
308 |
"begin_time": begin_time,
|
309 |
"end_time": end_time,
|
|
|
|
|
310 |
"event_location": event_location,
|
311 |
"station_location": station_location,
|
312 |
}
|
@@ -327,14 +333,13 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
327 |
for i, sta_id in enumerate(station_ids):
|
328 |
waveforms[i, :, : self.nt] = event[sta_id][:, : self.nt]
|
329 |
attrs = event[sta_id].attrs
|
330 |
-
phase_type.append(attrs["phase_type"])
|
331 |
-
phase_time.append(attrs["phase_time"])
|
332 |
-
phase_index.append(attrs["phase_index"])
|
333 |
-
phase_polarity.append(attrs["phase_polarity"])
|
334 |
station_location.append(
|
335 |
[attrs["longitude"], attrs["latitude"], -attrs["elevation_m"] / 1e3]
|
336 |
)
|
337 |
-
|
338 |
yield event_id, {
|
339 |
"data": waveforms,
|
340 |
"phase_time": phase_time,
|
@@ -343,6 +348,8 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
343 |
"phase_polarity": phase_polarity,
|
344 |
"begin_time": begin_time,
|
345 |
"end_time": end_time,
|
|
|
|
|
346 |
"event_location": event_location,
|
347 |
"station_location": station_location,
|
348 |
}
|
|
|
50 |
# TODO: Add link to the official dataset URLs here
|
51 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
52 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
53 |
+
_REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/waveform_h5"
|
54 |
_FILES = [
|
55 |
"1987.h5",
|
56 |
"1988.h5",
|
|
|
104 |
"""
|
105 |
yield a batch of event-based sample, so the number of sample stations can vary among batches
|
106 |
Batch Config for QuakeFlow_NC
|
|
|
|
|
107 |
"""
|
108 |
|
109 |
+
def __init__(self, **kwargs):
|
110 |
super().__init__(**kwargs)
|
|
|
|
|
111 |
|
112 |
|
113 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
|
|
168 |
features = datasets.Features(
|
169 |
{
|
170 |
"data": datasets.Array2D(shape=(3, self.nt), dtype="float32"),
|
|
|
171 |
"phase_time": datasets.Sequence(datasets.Value("string")),
|
172 |
"phase_index": datasets.Sequence(datasets.Value("int32")),
|
173 |
"phase_type": datasets.Sequence(datasets.Value("string")),
|
174 |
"phase_polarity": datasets.Sequence(datasets.Value("string")),
|
175 |
"begin_time": datasets.Value("string"),
|
176 |
"end_time": datasets.Value("string"),
|
177 |
+
"event_time": datasets.Value("string"),
|
178 |
+
"event_time_index": datasets.Value("int32"),
|
179 |
"event_location": datasets.Sequence(datasets.Value("float32")),
|
180 |
"station_location": datasets.Sequence(datasets.Value("float32")),
|
181 |
},
|
|
|
183 |
elif (self.config.name == "event") or (self.config.name == "event_train") or (self.config.name == "event_test"):
|
184 |
features = datasets.Features(
|
185 |
{
|
186 |
+
"data": datasets.Array3D(shape=(None, 3, self.nt), dtype="float32"),
|
187 |
+
"phase_time": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
188 |
+
"phase_index": datasets.Sequence(datasets.Sequence(datasets.Value("int32"))),
|
189 |
+
"phase_type": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
190 |
+
"phase_polarity": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
|
|
191 |
"begin_time": datasets.Value("string"),
|
192 |
"end_time": datasets.Value("string"),
|
193 |
+
"event_time": datasets.Value("string"),
|
194 |
+
"event_time_index": datasets.Value("int32"),
|
195 |
"event_location": datasets.Sequence(datasets.Value("float32")),
|
196 |
+
"station_location": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
|
197 |
},
|
198 |
)
|
199 |
+
else:
|
200 |
+
raise ValueError(f"config.name = {self.config.name} is not in BUILDER_CONFIGS")
|
201 |
|
202 |
return datasets.DatasetInfo(
|
203 |
# This is the description that will appear on the datasets page.
|
|
|
224 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
225 |
urls = _URLS[self.config.name]
|
226 |
# files = dl_manager.download(urls)
|
227 |
+
# files = dl_manager.download_and_extract(urls)
|
228 |
+
files = ["waveform_h5/1989.h5", "waveform_h5/1990.h5"]
|
229 |
print(files)
|
230 |
|
231 |
if self.config.name == "station" or self.config.name == "event":
|
|
|
281 |
event_attrs["longitude"],
|
282 |
event_attrs["latitude"],
|
283 |
event_attrs["depth_km"],
|
|
|
284 |
]
|
285 |
+
event_time = event_attrs["event_time"]
|
286 |
+
event_time_index = event_attrs["event_time_index"]
|
287 |
station_ids = list(event.keys())
|
288 |
+
if len(station_ids) == 0:
|
289 |
+
continue
|
290 |
if (
|
291 |
(self.config.name == "station")
|
292 |
or (self.config.name == "station_train")
|
|
|
311 |
"phase_polarity": phase_polarity,
|
312 |
"begin_time": begin_time,
|
313 |
"end_time": end_time,
|
314 |
+
"event_time": event_time,
|
315 |
+
"event_time_index": event_time_index,
|
316 |
"event_location": event_location,
|
317 |
"station_location": station_location,
|
318 |
}
|
|
|
333 |
for i, sta_id in enumerate(station_ids):
|
334 |
waveforms[i, :, : self.nt] = event[sta_id][:, : self.nt]
|
335 |
attrs = event[sta_id].attrs
|
336 |
+
phase_type.append(list(attrs["phase_type"]))
|
337 |
+
phase_time.append(list(attrs["phase_time"]))
|
338 |
+
phase_index.append(list(attrs["phase_index"]))
|
339 |
+
phase_polarity.append(list(attrs["phase_polarity"]))
|
340 |
station_location.append(
|
341 |
[attrs["longitude"], attrs["latitude"], -attrs["elevation_m"] / 1e3]
|
342 |
)
|
|
|
343 |
yield event_id, {
|
344 |
"data": waveforms,
|
345 |
"phase_time": phase_time,
|
|
|
348 |
"phase_polarity": phase_polarity,
|
349 |
"begin_time": begin_time,
|
350 |
"end_time": end_time,
|
351 |
+
"event_time": event_time,
|
352 |
+
"event_time_index": event_time_index,
|
353 |
"event_location": event_location,
|
354 |
"station_location": station_location,
|
355 |
}
|