add batched configuration
Browse files- README.md +67 -7
- quakeflow_nc.py +112 -32
README.md
CHANGED
@@ -76,36 +76,49 @@ Group: / len:10000
|
|
76 |
- torch (for PyTorch)
|
77 |
|
78 |
### Usage
|
|
|
79 |
```python
|
80 |
import h5py
|
81 |
import numpy as np
|
82 |
import torch
|
83 |
from torch.utils.data import Dataset, IterableDataset, DataLoader
|
84 |
from datasets import load_dataset
|
85 |
-
|
|
|
|
|
86 |
# load dataset
|
87 |
# ATTENTION: Streaming(Iterable Dataset) is difficult to support because of the feature of HDF5
|
88 |
# So we recommend to directly load the dataset and convert it into iterable later
|
89 |
# The dataset is very large, so you need to wait for some time at the first time
|
90 |
-
|
91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
```
|
93 |
-
|
|
|
94 |
```python
|
95 |
# change the 37 to the number of shards you want
|
96 |
_URLS = {
|
97 |
"NCEDC": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)]
|
98 |
}
|
99 |
```
|
100 |
-
Then you can use the dataset like this:
|
101 |
```python
|
102 |
-
|
|
|
103 |
quakeflow_nc
|
104 |
```
|
|
|
|
|
105 |
Then you can change the dataset into PyTorch format iterable dataset, and view the first sample:
|
106 |
```python
|
|
|
107 |
quakeflow_nc = quakeflow_nc.to_iterable_dataset()
|
108 |
-
quakeflow_nc = quakeflow_nc.with_format("torch")
|
109 |
# because add examples formatting to get tensors when using the "torch" format
|
110 |
# has not been implemented yet, we need to manually add the formatting
|
111 |
quakeflow_nc = quakeflow_nc.map(lambda x: {key: torch.from_numpy(np.array(value, dtype=np.float32)) for key, value in x.items()})
|
@@ -130,4 +143,51 @@ for batch in dataloader:
|
|
130 |
for key in batch.keys():
|
131 |
print(key, batch[key].shape, batch[key].dtype)
|
132 |
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
```
|
|
|
76 |
- torch (for PyTorch)
|
77 |
|
78 |
### Usage
|
79 |
+
Import the necessary packages:
|
80 |
```python
|
81 |
import h5py
|
82 |
import numpy as np
|
83 |
import torch
|
84 |
from torch.utils.data import Dataset, IterableDataset, DataLoader
|
85 |
from datasets import load_dataset
|
86 |
+
```
|
87 |
+
We have 2 configurations for the dataset: `NCEDC` and `NCEDC_Batch`. `NCEDC` return event-based samples(with 10 stations each) one by one, while `NCEDC_Batch` return event-based samples in specified batch size and specified stations number(default:`batch_size=16`,`num_stations_list=[5, 10, 20]`). The default configuration is `NCEDC`. You can specify the configuration by argument `name`. For example:
|
88 |
+
```python
|
89 |
# load dataset
|
90 |
# ATTENTION: Streaming(Iterable Dataset) is difficult to support because of the feature of HDF5
|
91 |
# So we recommend to directly load the dataset and convert it into iterable later
|
92 |
# The dataset is very large, so you need to wait for some time at the first time
|
93 |
+
|
94 |
+
# to load "NCEDC"
|
95 |
+
quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", split="train")
|
96 |
+
# or
|
97 |
+
quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="NCEDC", split="train")
|
98 |
+
|
99 |
+
# to load "NCEDC_Batch"
|
100 |
+
quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="NCEDC_Batch", split="train")
|
101 |
```
|
102 |
+
|
103 |
+
If you want to use the first several shards of the dataset, you can download the script `quakeflow_nc.py` and change the code as below:
|
104 |
```python
|
105 |
# change the 37 to the number of shards you want
|
106 |
_URLS = {
|
107 |
"NCEDC": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)]
|
108 |
}
|
109 |
```
|
110 |
+
Then you can use the dataset like this (Don't forget to specify the argument `name`):
|
111 |
```python
|
112 |
+
# don't forget to specify the script path
|
113 |
+
quakeflow_nc = datasets.load_dataset("path_to_script/quakeflow_nc.py", split="train")
|
114 |
quakeflow_nc
|
115 |
```
|
116 |
+
|
117 |
+
#### Usage for `NCEDC`
|
118 |
Then you can change the dataset into PyTorch format iterable dataset, and view the first sample:
|
119 |
```python
|
120 |
+
quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="NCEDC", split="train")
|
121 |
quakeflow_nc = quakeflow_nc.to_iterable_dataset()
|
|
|
122 |
# because add examples formatting to get tensors when using the "torch" format
|
123 |
# has not been implemented yet, we need to manually add the formatting
|
124 |
quakeflow_nc = quakeflow_nc.map(lambda x: {key: torch.from_numpy(np.array(value, dtype=np.float32)) for key, value in x.items()})
|
|
|
143 |
for key in batch.keys():
|
144 |
print(key, batch[key].shape, batch[key].dtype)
|
145 |
break
|
146 |
+
```
|
147 |
+
|
148 |
+
#### Usage for `NCEDC_Batch`
|
149 |
+
Then you can change the dataset into PyTorch format iterable dataset, and view the first sample:
|
150 |
+
```python
|
151 |
+
quakeflow_batch = datasets.load_dataset("AI4EPS/quakeflow_nc", split="train", name="NCEDC_Batch", batch_size=16, num_stations_list=[5,10,20])
|
152 |
+
|
153 |
+
# for PyTorch DataLoader, we need to divide the dataset into several shards
|
154 |
+
num_workers=4
|
155 |
+
quakeflow_batch = quakeflow_batch.to_iterable_dataset(num_shards=num_workers)
|
156 |
+
# because add examples formatting to get tensors when using the "torch" format
|
157 |
+
# has not been implemented yet, we need to manually add the formatting
|
158 |
+
quakeflow_batch = quakeflow_batch.map(lambda x: {key: torch.from_numpy(np.array(value, dtype=np.float32)) for key, value in x.items()})
|
159 |
+
def reorder_keys(example):
|
160 |
+
example["waveform"] = example["waveform"].permute(1,2,3,0).contiguous()
|
161 |
+
example["phase_pick"] = example["phase_pick"].permute(1,2,3,0).contiguous()
|
162 |
+
example["station_location"] = example["station_location"].permute(1,0,2).contiguous()
|
163 |
+
return example
|
164 |
+
|
165 |
+
quakeflow_batch = quakeflow_batch.map(reorder_keys)
|
166 |
+
|
167 |
+
try:
|
168 |
+
isinstance(quakeflow_batch, torch.utils.data.IterableDataset)
|
169 |
+
except:
|
170 |
+
raise Exception("quakeflow_nc is not an IterableDataset")
|
171 |
+
|
172 |
+
data_loader = DataLoader(
|
173 |
+
quakeflow_batch,
|
174 |
+
sampler=None,
|
175 |
+
collate_fn=None,
|
176 |
+
num_workers=num_workers,
|
177 |
+
)
|
178 |
+
|
179 |
+
for batch in quakeflow_batch:
|
180 |
+
print("\nIterable test\n")
|
181 |
+
print(batch.keys())
|
182 |
+
for key in batch.keys():
|
183 |
+
print(key, batch[key].shape, batch[key].dtype)
|
184 |
+
break
|
185 |
+
|
186 |
+
for batch in data_loader:
|
187 |
+
print("\nDataloader test\n")
|
188 |
+
print(batch.keys())
|
189 |
+
for key in batch.keys():
|
190 |
+
batch[key] = batch[key].squeeze(0)
|
191 |
+
print(key, batch[key].shape, batch[key].dtype)
|
192 |
+
break
|
193 |
```
|
quakeflow_nc.py
CHANGED
@@ -26,6 +26,7 @@ import torch
|
|
26 |
import fsspec
|
27 |
from glob import glob
|
28 |
from typing import Dict, List, Optional, Tuple, Union
|
|
|
29 |
|
30 |
import datasets
|
31 |
|
@@ -58,14 +59,35 @@ _LICENSE = ""
|
|
58 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
59 |
_REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/data"
|
60 |
_URLS = {
|
61 |
-
"NCEDC": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)]
|
|
|
62 |
}
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
66 |
class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
67 |
"""QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
VERSION = datasets.Version("1.1.0")
|
70 |
|
71 |
# This is an example of a dataset with multiple configurations.
|
@@ -79,22 +101,36 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
79 |
# You will be able to load one or the other configurations in the following list with
|
80 |
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
81 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
|
|
|
|
82 |
BUILDER_CONFIGS = [
|
83 |
-
|
|
|
84 |
]
|
85 |
|
86 |
-
DEFAULT_CONFIG_NAME = "
|
87 |
|
88 |
def _info(self):
|
89 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
return datasets.DatasetInfo(
|
99 |
# This is the description that will appear on the datasets page.
|
100 |
description=_DESCRIPTION,
|
@@ -130,6 +166,8 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
130 |
gen_kwargs={
|
131 |
"filepath": files,
|
132 |
"split": "train",
|
|
|
|
|
133 |
},
|
134 |
),
|
135 |
# datasets.SplitGenerator(
|
@@ -150,18 +188,20 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
150 |
# ),
|
151 |
]
|
152 |
|
153 |
-
|
154 |
-
nt = 8192
|
155 |
-
feature_nt = 512
|
156 |
-
feature_scale = int(nt / feature_nt)
|
157 |
-
sampling_rate=100.0
|
158 |
-
num_stations = 10
|
159 |
|
160 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
161 |
-
def _generate_examples(self, filepath, split):
|
162 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
163 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
164 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
|
166 |
for file in filepath:
|
167 |
with h5py.File(file, "r") as fp:
|
@@ -169,11 +209,17 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
169 |
for event_id in fp.keys():
|
170 |
event = fp[event_id]
|
171 |
station_ids = list(event.keys())
|
172 |
-
|
|
|
173 |
continue
|
174 |
-
|
175 |
-
|
176 |
-
|
|
|
|
|
|
|
|
|
|
|
177 |
waveforms = np.zeros([3, self.nt, len(station_ids)])
|
178 |
phase_pick = np.zeros_like(waveforms)
|
179 |
attrs = event.attrs
|
@@ -182,13 +228,11 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
182 |
|
183 |
for i, sta_id in enumerate(station_ids):
|
184 |
# trace_id = event_id + "/" + sta_id
|
185 |
-
|
186 |
waveforms[:, :, i] = event[sta_id][:,:self.nt]
|
187 |
attrs = event[sta_id].attrs
|
188 |
p_picks = attrs["phase_index"][attrs["phase_type"] == "P"]
|
189 |
s_picks = attrs["phase_index"][attrs["phase_type"] == "S"]
|
190 |
phase_pick[:, :, i] = generate_label([p_picks, s_picks], nt=self.nt)
|
191 |
-
|
192 |
station_location.append([attrs["longitude"], attrs["latitude"], -attrs["elevation_m"]/1e3])
|
193 |
|
194 |
std = np.std(waveforms, axis=1, keepdims=True)
|
@@ -196,13 +240,49 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
|
196 |
waveforms = (waveforms - np.mean(waveforms, axis=1, keepdims=True)) / std
|
197 |
waveforms = waveforms.astype(np.float32)
|
198 |
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
|
207 |
|
208 |
def generate_label(phase_list, label_width=[150, 150], nt=8192):
|
|
|
26 |
import fsspec
|
27 |
from glob import glob
|
28 |
from typing import Dict, List, Optional, Tuple, Union
|
29 |
+
from collections import defaultdict
|
30 |
|
31 |
import datasets
|
32 |
|
|
|
59 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
60 |
_REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/data"
|
61 |
_URLS = {
|
62 |
+
"NCEDC": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)],
|
63 |
+
"NCEDC_Batch": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)],
|
64 |
}
|
65 |
|
66 |
+
class BatchBuilderConfig(datasets.BuilderConfig):
|
67 |
+
"""
|
68 |
+
yield a batch of event-based sample, so the number of sample stations can vary among batches
|
69 |
+
Batch Config for QuakeFlow_NC
|
70 |
+
:param batch_size: number of samples in a batch
|
71 |
+
:param num_stations_list: possible number of stations in a batch
|
72 |
+
"""
|
73 |
+
def __init__(self, batch_size: int, num_stations_list: List, **kwargs):
|
74 |
+
super().__init__(**kwargs)
|
75 |
+
self.batch_size = batch_size
|
76 |
+
self.num_stations_list = num_stations_list
|
77 |
+
#batch_size: int=None
|
78 |
+
#num_stations_list: List=None
|
79 |
|
80 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
81 |
class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
|
82 |
"""QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
|
83 |
|
84 |
+
degree2km = 111.32
|
85 |
+
nt = 8192
|
86 |
+
feature_nt = 512
|
87 |
+
feature_scale = int(nt / feature_nt)
|
88 |
+
sampling_rate=100.0
|
89 |
+
num_stations = 10
|
90 |
+
|
91 |
VERSION = datasets.Version("1.1.0")
|
92 |
|
93 |
# This is an example of a dataset with multiple configurations.
|
|
|
101 |
# You will be able to load one or the other configurations in the following list with
|
102 |
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
103 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
104 |
+
|
105 |
+
# default config, you can change batch_size and num_stations_list when use `datasets.load_dataset`
|
106 |
BUILDER_CONFIGS = [
|
107 |
+
BatchBuilderConfig(name="NCEDC", version=VERSION, description="yield event-based samples one by one, the", batch_size=1, num_stations_list=[num_stations]),
|
108 |
+
BatchBuilderConfig(name="NCEDC_Batch", version=VERSION, description="This part of my dataset covers a first domain", batch_size=16, num_stations_list=[5, 10, 20]),
|
109 |
]
|
110 |
|
111 |
+
DEFAULT_CONFIG_NAME = "NCEDC_Batch" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
112 |
|
113 |
def _info(self):
|
114 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
115 |
+
if self.config.name=="NCEDC":
|
116 |
+
features=datasets.Features(
|
117 |
+
{
|
118 |
+
"waveform": datasets.Array3D(shape=(3, self.nt, self.num_stations), dtype='float32'),
|
119 |
+
"phase_pick": datasets.Array3D(shape=(3, self.nt, self.num_stations), dtype='float32'),
|
120 |
+
"event_location": datasets.Sequence(datasets.Value("float32")),
|
121 |
+
"station_location": datasets.Array2D(shape=(self.num_stations, 3), dtype="float32"),
|
122 |
+
})
|
123 |
+
|
124 |
+
elif self.config.name=="NCEDC_Batch":
|
125 |
+
features=datasets.Features(
|
126 |
+
{
|
127 |
+
"waveform": datasets.Array4D(shape=(None, self.config.batch_size, 3, self.nt), dtype='float32'),# datasets.Array4D(shape=(self.config.batch_size, 3, self.nt, self.num_stations), dtype='float32'),
|
128 |
+
"phase_pick": datasets.Array4D(shape=(None, self.config.batch_size, 3, self.nt), dtype='float32'),
|
129 |
+
"event_location": datasets.Array2D(shape=(self.config.batch_size, 4), dtype="float32"),
|
130 |
+
"station_location": datasets.Array3D(shape=(None, self.config.batch_size, 3), dtype="float32"),
|
131 |
+
}
|
132 |
+
)
|
133 |
+
|
134 |
return datasets.DatasetInfo(
|
135 |
# This is the description that will appear on the datasets page.
|
136 |
description=_DESCRIPTION,
|
|
|
166 |
gen_kwargs={
|
167 |
"filepath": files,
|
168 |
"split": "train",
|
169 |
+
"batch_size": self.config.batch_size,
|
170 |
+
"num_stations_list": self.config.num_stations_list,
|
171 |
},
|
172 |
),
|
173 |
# datasets.SplitGenerator(
|
|
|
188 |
# ),
|
189 |
]
|
190 |
|
191 |
+
|
|
|
|
|
|
|
|
|
|
|
192 |
|
193 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
194 |
+
def _generate_examples(self, filepath, split, batch_size, num_stations_list):
|
195 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
196 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
197 |
+
num_batches = 0
|
198 |
+
|
199 |
+
num_stations_list = np.array(num_stations_list)
|
200 |
+
if self.config.name=="NCEDC_Batch":
|
201 |
+
waveform_buffer_per_group = defaultdict(list)
|
202 |
+
phase_pick_buffer_per_group = defaultdict(list)
|
203 |
+
event_location_buffer_per_group = defaultdict(list)
|
204 |
+
station_location_buffer_per_group = defaultdict(list)
|
205 |
|
206 |
for file in filepath:
|
207 |
with h5py.File(file, "r") as fp:
|
|
|
209 |
for event_id in fp.keys():
|
210 |
event = fp[event_id]
|
211 |
station_ids = list(event.keys())
|
212 |
+
|
213 |
+
if len(station_ids) < num_stations_list[0]:
|
214 |
continue
|
215 |
+
|
216 |
+
if batch_size==1 and self.config.name=="NCEDC":
|
217 |
+
num_stations = num_stations_list[0]
|
218 |
+
elif self.config.name=="NCEDC_Batch":
|
219 |
+
num_stations=num_stations_list[num_stations_list<=len(station_ids)][-1]
|
220 |
+
|
221 |
+
station_ids = np.random.choice(station_ids, num_stations, replace=False)
|
222 |
+
|
223 |
waveforms = np.zeros([3, self.nt, len(station_ids)])
|
224 |
phase_pick = np.zeros_like(waveforms)
|
225 |
attrs = event.attrs
|
|
|
228 |
|
229 |
for i, sta_id in enumerate(station_ids):
|
230 |
# trace_id = event_id + "/" + sta_id
|
|
|
231 |
waveforms[:, :, i] = event[sta_id][:,:self.nt]
|
232 |
attrs = event[sta_id].attrs
|
233 |
p_picks = attrs["phase_index"][attrs["phase_type"] == "P"]
|
234 |
s_picks = attrs["phase_index"][attrs["phase_type"] == "S"]
|
235 |
phase_pick[:, :, i] = generate_label([p_picks, s_picks], nt=self.nt)
|
|
|
236 |
station_location.append([attrs["longitude"], attrs["latitude"], -attrs["elevation_m"]/1e3])
|
237 |
|
238 |
std = np.std(waveforms, axis=1, keepdims=True)
|
|
|
240 |
waveforms = (waveforms - np.mean(waveforms, axis=1, keepdims=True)) / std
|
241 |
waveforms = waveforms.astype(np.float32)
|
242 |
|
243 |
+
if batch_size==1 and self.config.name=="NCEDC":
|
244 |
+
yield event_id, {
|
245 |
+
"waveform": torch.from_numpy(waveforms).float(),
|
246 |
+
"phase_pick": torch.from_numpy(phase_pick).float(),
|
247 |
+
"event_location": event_location,
|
248 |
+
"station_location": station_location,
|
249 |
+
}
|
250 |
+
elif self.config.name=="NCEDC_Batch":
|
251 |
+
|
252 |
+
waveform_buffer_per_group[num_stations].append(waveforms)
|
253 |
+
phase_pick_buffer_per_group[num_stations].append(phase_pick)
|
254 |
+
event_location_buffer_per_group[num_stations].append(event_location)
|
255 |
+
station_location_buffer_per_group[num_stations].append(station_location)
|
256 |
+
|
257 |
+
if len(waveform_buffer_per_group[num_stations])==batch_size:
|
258 |
+
yield num_batches, {
|
259 |
+
"waveform": torch.from_numpy(np.stack(waveform_buffer_per_group[num_stations], axis=0)).float().permute(3,0,1,2),
|
260 |
+
"phase_pick": torch.from_numpy(np.stack(phase_pick_buffer_per_group[num_stations], axis=0)).float().permute(3,0,1,2),
|
261 |
+
"event_location": torch.from_numpy(np.stack(event_location_buffer_per_group[num_stations], axis=0)).float(),
|
262 |
+
"station_location": torch.from_numpy(np.stack(station_location_buffer_per_group[num_stations], axis=0)).float().permute(1,0,2),
|
263 |
+
}
|
264 |
+
del waveform_buffer_per_group[num_stations]
|
265 |
+
del phase_pick_buffer_per_group[num_stations]
|
266 |
+
del event_location_buffer_per_group[num_stations]
|
267 |
+
del station_location_buffer_per_group[num_stations]
|
268 |
+
num_batches += 1
|
269 |
+
assert len(waveform_buffer_per_group[num_stations])<batch_size, "batch size is not correct"
|
270 |
+
|
271 |
+
'''
|
272 |
+
# drop_last=False
|
273 |
+
if self.config.name=="NCEDC_Batch":
|
274 |
+
for num_stations in waveform_buffer_per_group:
|
275 |
+
yield event_id, {
|
276 |
+
"waveform": torch.from_numpy(np.stack(waveform_buffer_per_group, axis=0)).float(),
|
277 |
+
"phase_pick": torch.from_numpy(np.stack(phase_pick_buffer_per_group, axis=0)).float(),
|
278 |
+
"event_location": np.stack(event_location_buffer_per_group, axis=0),
|
279 |
+
"station_location": np.stack(station_location_buffer_per_group, axis=0),
|
280 |
+
}
|
281 |
+
del waveform_buffer_per_group[num_stations]
|
282 |
+
del phase_pick_buffer_per_group[num_stations]
|
283 |
+
del event_location_buffer_per_group[num_stations]
|
284 |
+
del station_location_buffer_per_group[num_stations]
|
285 |
+
'''
|
286 |
|
287 |
|
288 |
def generate_label(phase_list, label_width=[150, 150], nt=8192):
|