Datasets:
ArXiv:
DOI:
License:
seismic network update
#2
by
kylewhy
- opened
- README.md +130 -0
- ncedc_event_dataset_000.h5.txt +0 -0
- quakeflow_nc.py +71 -14
README.md
CHANGED
@@ -1,3 +1,133 @@
|
|
1 |
---
|
2 |
license: mit
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: mit
|
3 |
---
|
4 |
+
|
5 |
+
# Quakeflow_NC
|
6 |
+
|
7 |
+
## Introduction
|
8 |
+
This dataset is part of the data from NCEDC (Northern California Earthquake Data Center) and is organised as several HDF5 files. The dataset structure is shown below: (File [ncedc_event_dataset_000.h5.txt](./ncedc_event_dataset_000.h5.txt) shows the structure of the firsr shard of the dataset, and you can find more information about the format at [AI4EPS](https://ai4eps.github.io/homepage/ml4earth/seismic_event_format1/))
|
9 |
+
|
10 |
+
```
|
11 |
+
Group: / len:10000
|
12 |
+
|- Group: /nc100012 len:5
|
13 |
+
| |-* begin_time = 1987-05-08T00:15:48.890
|
14 |
+
| |-* depth_km = 7.04
|
15 |
+
| |-* end_time = 1987-05-08T00:17:48.890
|
16 |
+
| |-* event_id = nc100012
|
17 |
+
| |-* event_time = 1987-05-08T00:16:14.700
|
18 |
+
| |-* event_time_index = 2581
|
19 |
+
| |-* latitude = 37.5423
|
20 |
+
| |-* longitude = -118.4412
|
21 |
+
| |-* magnitude = 1.1
|
22 |
+
| |-* magnitude_type = D
|
23 |
+
| |-* num_stations = 5
|
24 |
+
| |- Dataset: /nc100012/NC.MRS..EH (shape:(3, 12000))
|
25 |
+
| | |- (dtype=float32)
|
26 |
+
| | | |-* azimuth = 265.0
|
27 |
+
| | | |-* component = ['Z']
|
28 |
+
| | | |-* distance_km = 39.1
|
29 |
+
| | | |-* dt_s = 0.01
|
30 |
+
| | | |-* elevation_m = 3680.0
|
31 |
+
| | | |-* emergence_angle = 93.0
|
32 |
+
| | | |-* event_id = ['nc100012' 'nc100012']
|
33 |
+
| | | |-* latitude = 37.5107
|
34 |
+
| | | |-* location =
|
35 |
+
| | | |-* longitude = -118.8822
|
36 |
+
| | | |-* network = NC
|
37 |
+
| | | |-* phase_index = [3274 3802]
|
38 |
+
| | | |-* phase_polarity = ['U' 'N']
|
39 |
+
| | | |-* phase_remark = ['IP' 'S']
|
40 |
+
| | | |-* phase_score = [1 1]
|
41 |
+
| | | |-* phase_time = ['1987-05-08T00:16:21.630' '1987-05-08T00:16:26.920']
|
42 |
+
| | | |-* phase_type = ['P' 'S']
|
43 |
+
| | | |-* snr = [0. 0. 1.98844361]
|
44 |
+
| | | |-* station = MRS
|
45 |
+
| | | |-* unit = 1e-6m/s
|
46 |
+
| |- Dataset: /nc100012/NN.BEN.N1.EH (shape:(3, 12000))
|
47 |
+
| | |- (dtype=float32)
|
48 |
+
| | | |-* azimuth = 329.0
|
49 |
+
| | | |-* component = ['Z']
|
50 |
+
| | | |-* distance_km = 22.5
|
51 |
+
| | | |-* dt_s = 0.01
|
52 |
+
| | | |-* elevation_m = 2476.0
|
53 |
+
| | | |-* emergence_angle = 102.0
|
54 |
+
| | | |-* event_id = ['nc100012' 'nc100012']
|
55 |
+
| | | |-* latitude = 37.7154
|
56 |
+
| | | |-* location = N1
|
57 |
+
| | | |-* longitude = -118.5741
|
58 |
+
| | | |-* network = NN
|
59 |
+
| | | |-* phase_index = [3010 3330]
|
60 |
+
| | | |-* phase_polarity = ['U' 'N']
|
61 |
+
| | | |-* phase_remark = ['IP' 'S']
|
62 |
+
| | | |-* phase_score = [0 0]
|
63 |
+
| | | |-* phase_time = ['1987-05-08T00:16:18.990' '1987-05-08T00:16:22.190']
|
64 |
+
| | | |-* phase_type = ['P' 'S']
|
65 |
+
| | | |-* snr = [0. 0. 7.31356192]
|
66 |
+
| | | |-* station = BEN
|
67 |
+
| | | |-* unit = 1e-6m/s
|
68 |
+
......
|
69 |
+
```
|
70 |
+
|
71 |
+
## How to use
|
72 |
+
|
73 |
+
### Requirements
|
74 |
+
- datasets
|
75 |
+
- h5py
|
76 |
+
- torch (for PyTorch)
|
77 |
+
|
78 |
+
### Usage
|
79 |
+
```python
|
80 |
+
import h5py
|
81 |
+
import numpy as np
|
82 |
+
import torch
|
83 |
+
from torch.utils.data import Dataset, IterableDataset, DataLoader
|
84 |
+
from datasets import load_dataset
|
85 |
+
|
86 |
+
# load dataset
|
87 |
+
# ATTENTION: Streaming(Iterable Dataset) is difficult to support because of the feature of HDF5
|
88 |
+
# So we recommend to directly load the dataset and convert it into iterable later
|
89 |
+
# The dataset is very large, so you need to wait for some time at the first time
|
90 |
+
quakeflow_nc = datasets.load_dataset("AI4EPS/quakeflow_nc", split="train")
|
91 |
+
quakeflow_nc
|
92 |
+
```
|
93 |
+
If you want to use the first several shards of the dataset, you can download the script `quakeflow_nc.py` and change the code below:
|
94 |
+
```python
|
95 |
+
# change the 37 to the number of shards you want
|
96 |
+
_URLS = {
|
97 |
+
"NCEDC": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)]
|
98 |
+
}
|
99 |
+
```
|
100 |
+
Then you can use the dataset like this:
|
101 |
+
```python
|
102 |
+
quakeflow_nc = datasets.load_dataset("./quakeflow_nc.py", split="train")
|
103 |
+
quakeflow_nc
|
104 |
+
```
|
105 |
+
Then you can change the dataset into PyTorch format iterable dataset, and view the first sample:
|
106 |
+
```python
|
107 |
+
quakeflow_nc = quakeflow_nc.to_iterable_dataset()
|
108 |
+
quakeflow_nc = quakeflow_nc.with_format("torch")
|
109 |
+
# because add examples formatting to get tensors when using the "torch" format
|
110 |
+
# has not been implemented yet, we need to manually add the formatting
|
111 |
+
quakeflow_nc = quakeflow_nc.map(lambda x: {key: torch.from_numpy(np.array(value, dtype=np.float32)) for key, value in x.items()})
|
112 |
+
try:
|
113 |
+
isinstance(quakeflow_nc, torch.utils.data.IterableDataset)
|
114 |
+
except:
|
115 |
+
raise Exception("quakeflow_nc is not an IterableDataset")
|
116 |
+
|
117 |
+
# print the first sample of the iterable dataset
|
118 |
+
for example in quakeflow_nc:
|
119 |
+
print("\nIterable test\n")
|
120 |
+
print(example.keys())
|
121 |
+
for key in example.keys():
|
122 |
+
print(key, example[key].shape, example[key].dtype)
|
123 |
+
break
|
124 |
+
|
125 |
+
dataloader = DataLoader(quakeflow_nc, batch_size=4)
|
126 |
+
|
127 |
+
for batch in dataloader:
|
128 |
+
print("\nDataloader test\n")
|
129 |
+
print(batch.keys())
|
130 |
+
for key in batch.keys():
|
131 |
+
print(key, batch[key].shape, batch[key].dtype)
|
132 |
+
break
|
133 |
+
```
|
ncedc_event_dataset_000.h5.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
quakeflow_nc.py
CHANGED
@@ -21,7 +21,11 @@ import csv
|
|
21 |
import json
|
22 |
import os
|
23 |
import h5py
|
|
|
|
|
|
|
24 |
from glob import glob
|
|
|
25 |
|
26 |
import datasets
|
27 |
|
@@ -52,7 +56,7 @@ _LICENSE = ""
|
|
52 |
# TODO: Add link to the official dataset URLs here
|
53 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
54 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
55 |
-
_REPO = "https://huggingface.co/datasets/AI4EPS/
|
56 |
_URLS = {
|
57 |
"NCEDC": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)]
|
58 |
}
|
@@ -85,9 +89,10 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
85 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
86 |
features=datasets.Features(
|
87 |
{
|
88 |
-
"
|
89 |
-
"
|
90 |
-
"
|
|
|
91 |
}
|
92 |
)
|
93 |
return datasets.DatasetInfo(
|
@@ -144,21 +149,73 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
144 |
# },
|
145 |
# ),
|
146 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
|
148 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
149 |
def _generate_examples(self, filepath, split):
|
150 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
151 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
152 |
-
|
|
|
153 |
for file in filepath:
|
154 |
with h5py.File(file, "r") as fp:
|
155 |
-
for event_id in sorted(list(fp.keys())):
|
|
|
156 |
event = fp[event_id]
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
import json
|
22 |
import os
|
23 |
import h5py
|
24 |
+
import numpy as np
|
25 |
+
import torch
|
26 |
+
import fsspec
|
27 |
from glob import glob
|
28 |
+
from typing import Dict, List, Optional, Tuple, Union
|
29 |
|
30 |
import datasets
|
31 |
|
|
|
56 |
# TODO: Add link to the official dataset URLs here
|
57 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
58 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
59 |
+
_REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/data"
|
60 |
_URLS = {
|
61 |
"NCEDC": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)]
|
62 |
}
|
|
|
89 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
90 |
features=datasets.Features(
|
91 |
{
|
92 |
+
"waveform": datasets.Array3D(shape=(3, self.nt, self.num_stations), dtype='float32'),
|
93 |
+
"phase_pick": datasets.Array3D(shape=(3, self.nt, self.num_stations), dtype='float32'),
|
94 |
+
"event_location": [datasets.Value("float32")],
|
95 |
+
"station_location": datasets.Array2D(shape=(self.num_stations, 3), dtype="float32"),
|
96 |
}
|
97 |
)
|
98 |
return datasets.DatasetInfo(
|
|
|
149 |
# },
|
150 |
# ),
|
151 |
]
|
152 |
+
|
153 |
+
degree2km = 111.32
|
154 |
+
nt = 8192
|
155 |
+
feature_nt = 512
|
156 |
+
feature_scale = int(nt / feature_nt)
|
157 |
+
sampling_rate=100.0
|
158 |
+
num_stations = 10
|
159 |
|
160 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
161 |
def _generate_examples(self, filepath, split):
|
162 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
163 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
164 |
+
num_stations = self.num_stations
|
165 |
+
|
166 |
for file in filepath:
|
167 |
with h5py.File(file, "r") as fp:
|
168 |
+
# for event_id in sorted(list(fp.keys())):
|
169 |
+
for event_id in fp.keys():
|
170 |
event = fp[event_id]
|
171 |
+
station_ids = list(event.keys())
|
172 |
+
if len(station_ids) < num_stations:
|
173 |
+
continue
|
174 |
+
else:
|
175 |
+
station_ids = np.random.choice(station_ids, num_stations, replace=False)
|
176 |
+
|
177 |
+
waveforms = np.zeros([3, self.nt, len(station_ids)])
|
178 |
+
phase_pick = np.zeros_like(waveforms)
|
179 |
+
attrs = event.attrs
|
180 |
+
event_location = [attrs["longitude"], attrs["latitude"], attrs["depth_km"], attrs["event_time_index"]]
|
181 |
+
station_location = []
|
182 |
+
|
183 |
+
for i, sta_id in enumerate(station_ids):
|
184 |
+
# trace_id = event_id + "/" + sta_id
|
185 |
+
|
186 |
+
waveforms[:, :, i] = event[sta_id][:,:self.nt]
|
187 |
+
attrs = event[sta_id].attrs
|
188 |
+
p_picks = attrs["phase_index"][attrs["phase_type"] == "P"]
|
189 |
+
s_picks = attrs["phase_index"][attrs["phase_type"] == "S"]
|
190 |
+
phase_pick[:, :, i] = generate_label([p_picks, s_picks], nt=self.nt)
|
191 |
+
|
192 |
+
station_location.append([attrs["longitude"], attrs["latitude"], -attrs["elevation_m"]/1e3])
|
193 |
+
|
194 |
+
std = np.std(waveforms, axis=1, keepdims=True)
|
195 |
+
std[std == 0] = 1.0
|
196 |
+
waveforms = (waveforms - np.mean(waveforms, axis=1, keepdims=True)) / std
|
197 |
+
waveforms = waveforms.astype(np.float32)
|
198 |
+
|
199 |
+
yield event_id, {
|
200 |
+
"waveform": torch.from_numpy(waveforms).float(),
|
201 |
+
"phase_pick": torch.from_numpy(phase_pick).float(),
|
202 |
+
"event_location": event_location,
|
203 |
+
"station_location": station_location,
|
204 |
+
}
|
205 |
+
|
206 |
+
|
207 |
+
|
208 |
+
def generate_label(phase_list, label_width=[150, 150], nt=8192):
|
209 |
+
|
210 |
+
target = np.zeros([len(phase_list) + 1, nt], dtype=np.float32)
|
211 |
+
|
212 |
+
for i, (picks, w) in enumerate(zip(phase_list, label_width)):
|
213 |
+
for phase_time in picks:
|
214 |
+
t = np.arange(nt) - phase_time
|
215 |
+
gaussian = np.exp(-(t**2) / (2 * (w / 6) ** 2))
|
216 |
+
gaussian[gaussian < 0.1] = 0.0
|
217 |
+
target[i + 1, :] += gaussian
|
218 |
+
|
219 |
+
target[0:1, :] = np.maximum(0, 1 - np.sum(target[1:, :], axis=0, keepdims=True))
|
220 |
+
|
221 |
+
return target
|