Datasets:

DOI:
License:
zhuwq0 commited on
Commit
a40c87a
1 Parent(s): 9c0aeec

add sperate train and test splits

Browse files
Files changed (1) hide show
  1. quakeflow_nc.py +80 -36
quakeflow_nc.py CHANGED
@@ -52,7 +52,7 @@ _LICENSE = ""
52
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
53
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
54
  _REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/data"
55
- _FILENAMES = [
56
  "NC1970-1989.h5",
57
  "NC1990-1994.h5",
58
  "NC1995-1999.h5",
@@ -70,10 +70,13 @@ _FILENAMES = [
70
  "NC2019.h5",
71
  "NC2020.h5",
72
  ]
73
- # _FILENAMES = ["NC2020.h5"]
74
  _URLS = {
75
- "station": [f"{_REPO}/{x}" for x in _FILENAMES],
76
- "event": [f"{_REPO}/{x}" for x in _FILENAMES],
 
 
 
 
77
  }
78
 
79
 
@@ -117,17 +120,39 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
117
 
118
  # default config, you can change batch_size and num_stations_list when use `datasets.load_dataset`
119
  BUILDER_CONFIGS = [
120
- datasets.BuilderConfig(name="station", version=VERSION, description="yield station-based samples one by one"),
121
- datasets.BuilderConfig(name="event", version=VERSION, description="yield event-based samples one by one"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  ]
123
 
124
  DEFAULT_CONFIG_NAME = (
125
- "station" # It's not mandatory to have a default configuration. Just use one if it make sense.
126
  )
127
 
128
  def _info(self):
129
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
130
- if self.config.name == "station":
 
 
 
 
131
  features = datasets.Features(
132
  {
133
  "waveform": datasets.Array2D(shape=(3, self.nt), dtype="float32"),
@@ -137,7 +162,7 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
137
  }
138
  )
139
 
140
- elif self.config.name == "event":
141
  features = datasets.Features(
142
  {
143
  "waveform": datasets.Array3D(shape=(None, 3, self.nt), dtype="float32"),
@@ -173,31 +198,42 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
173
  urls = _URLS[self.config.name]
174
  # files = dl_manager.download(urls)
175
  files = dl_manager.download_and_extract(urls)
176
- # files = ["./data/ncedc_event_dataset_000.h5"]
177
-
178
- return [
179
- datasets.SplitGenerator(
180
- name=datasets.Split.TRAIN,
181
- # These kwargs will be passed to _generate_examples
182
- gen_kwargs={
183
- "filepath": files[:-1],
184
- "split": "train",
185
- },
186
- ),
187
- # datasets.SplitGenerator(
188
- # name=datasets.Split.VALIDATION,
189
- # # These kwargs will be passed to _generate_examples
190
- # gen_kwargs={
191
- # "filepath": os.path.join(data_dir, "dev.jsonl"),
192
- # "split": "dev",
193
- # },
194
- # ),
195
- datasets.SplitGenerator(
196
- name=datasets.Split.TEST,
197
- # These kwargs will be passed to _generate_examples
198
- gen_kwargs={"filepath": files[-1:], "split": "test"},
199
- ),
200
- ]
 
 
 
 
 
 
 
 
 
 
 
201
 
202
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
203
  def _generate_examples(self, filepath, split):
@@ -212,7 +248,11 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
212
  for event_id in event_ids:
213
  event = fp[event_id]
214
  station_ids = list(event.keys())
215
- if self.config.name == "station":
 
 
 
 
216
  waveforms = np.zeros([3, self.nt], dtype="float32")
217
  phase_pick = np.zeros_like(waveforms)
218
  attrs = event.attrs
@@ -239,7 +279,11 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
239
  "station_location": torch.from_numpy(np.array(station_location)).float(),
240
  }
241
 
242
- elif self.config.name == "event":
 
 
 
 
243
  waveforms = np.zeros([len(station_ids), 3, self.nt], dtype="float32")
244
  phase_pick = np.zeros_like(waveforms)
245
  attrs = event.attrs
 
52
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
53
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
54
  _REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/data"
55
+ _FILES = [
56
  "NC1970-1989.h5",
57
  "NC1990-1994.h5",
58
  "NC1995-1999.h5",
 
70
  "NC2019.h5",
71
  "NC2020.h5",
72
  ]
 
73
  _URLS = {
74
+ "station": [f"{_REPO}/{x}" for x in _FILES],
75
+ "event": [f"{_REPO}/{x}" for x in _FILES],
76
+ "station_train": [f"{_REPO}/{x}" for x in _FILES[:-1]],
77
+ "event_train": [f"{_REPO}/{x}" for x in _FILES[:-1]],
78
+ "station_test": [f"{_REPO}/{x}" for x in _FILES[-1:]],
79
+ "event_test": [f"{_REPO}/{x}" for x in _FILES[-1:]],
80
  }
81
 
82
 
 
120
 
121
  # default config, you can change batch_size and num_stations_list when use `datasets.load_dataset`
122
  BUILDER_CONFIGS = [
123
+ datasets.BuilderConfig(
124
+ name="station", version=VERSION, description="yield station-based samples one by one of whole dataset"
125
+ ),
126
+ datasets.BuilderConfig(
127
+ name="event", version=VERSION, description="yield event-based samples one by one of whole dataset"
128
+ ),
129
+ datasets.BuilderConfig(
130
+ name="station_train",
131
+ version=VERSION,
132
+ description="yield station-based samples one by one of training dataset",
133
+ ),
134
+ datasets.BuilderConfig(
135
+ name="event_train", version=VERSION, description="yield event-based samples one by one of training dataset"
136
+ ),
137
+ datasets.BuilderConfig(
138
+ name="station_test", version=VERSION, description="yield station-based samples one by one of test dataset"
139
+ ),
140
+ datasets.BuilderConfig(
141
+ name="event_test", version=VERSION, description="yield event-based samples one by one of test dataset"
142
+ ),
143
  ]
144
 
145
  DEFAULT_CONFIG_NAME = (
146
+ "station_test" # It's not mandatory to have a default configuration. Just use one if it make sense.
147
  )
148
 
149
  def _info(self):
150
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
151
+ if (
152
+ (self.config.name == "station")
153
+ or (self.config.name == "station_train")
154
+ or (self.config.name == "station_test")
155
+ ):
156
  features = datasets.Features(
157
  {
158
  "waveform": datasets.Array2D(shape=(3, self.nt), dtype="float32"),
 
162
  }
163
  )
164
 
165
+ elif (self.config.name == "event") or (self.config.name == "event_train") or (self.config.name == "event_test"):
166
  features = datasets.Features(
167
  {
168
  "waveform": datasets.Array3D(shape=(None, 3, self.nt), dtype="float32"),
 
198
  urls = _URLS[self.config.name]
199
  # files = dl_manager.download(urls)
200
  files = dl_manager.download_and_extract(urls)
201
+ print(files)
202
+
203
+ if self.config.name == "station" or self.config.name == "event":
204
+ return [
205
+ datasets.SplitGenerator(
206
+ name=datasets.Split.TRAIN,
207
+ # These kwargs will be passed to _generate_examples
208
+ gen_kwargs={
209
+ "filepath": files[:-1],
210
+ "split": "train",
211
+ },
212
+ ),
213
+ datasets.SplitGenerator(
214
+ name=datasets.Split.TEST,
215
+ gen_kwargs={"filepath": files[-1:], "split": "test"},
216
+ ),
217
+ ]
218
+ elif self.config.name == "station_train" or self.config.name == "event_train":
219
+ return [
220
+ datasets.SplitGenerator(
221
+ name=datasets.Split.TRAIN,
222
+ gen_kwargs={
223
+ "filepath": files,
224
+ "split": "train",
225
+ },
226
+ ),
227
+ ]
228
+ elif self.config.name == "station_test" or self.config.name == "event_test":
229
+ return [
230
+ datasets.SplitGenerator(
231
+ name=datasets.Split.TEST,
232
+ gen_kwargs={"filepath": files, "split": "test"},
233
+ ),
234
+ ]
235
+ else:
236
+ raise ValueError("config.name is not in BUILDER_CONFIGS")
237
 
238
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
239
  def _generate_examples(self, filepath, split):
 
248
  for event_id in event_ids:
249
  event = fp[event_id]
250
  station_ids = list(event.keys())
251
+ if (
252
+ (self.config.name == "station")
253
+ or (self.config.name == "station_train")
254
+ or (self.config.name == "station_test")
255
+ ):
256
  waveforms = np.zeros([3, self.nt], dtype="float32")
257
  phase_pick = np.zeros_like(waveforms)
258
  attrs = event.attrs
 
279
  "station_location": torch.from_numpy(np.array(station_location)).float(),
280
  }
281
 
282
+ elif (
283
+ (self.config.name == "event")
284
+ or (self.config.name == "event_train")
285
+ or (self.config.name == "event_test")
286
+ ):
287
  waveforms = np.zeros([len(station_ids), 3, self.nt], dtype="float32")
288
  phase_pick = np.zeros_like(waveforms)
289
  attrs = event.attrs