Datasets:

DOI:
License:
zhuwq0 commited on
Commit
01b1ca5
1 Parent(s): 9286e48

update dataset

Browse files
data/ncedc_event_dataset_000.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c84dd6df126ceb8c41de0b94bda675a0fc412ff36b39fced966c5fe7f5b3833
3
- size 449021008
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b758181c48df7c688e6f99fefca2169341b7ec930a9ed2edc1ae941dbf00c1e
3
+ size 5069575680
data/ncedc_event_dataset_001.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1522e08abe9b56605d12bf3a68f126c0e7efe225025cd0f4bdc2f0693d65891
3
- size 402210384
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:609060804dc5f1a3361814cd634fb7664284dbfbcde85de61b0ceaf4f3242de5
3
+ size 3604131920
data/ncedc_event_dataset_002.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55cd70c7fd7701bd065cc7eea3cfc20825bafa7016f71cb9338423f64c16b125
3
+ size 3767102232
data/ncedc_event_dataset_003.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:182eac5c27e301213e37d9c512167825d64ec102a0a9ac7cf0fe88f3b7401353
3
+ size 3139007576
data/ncedc_event_dataset_004.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60903a979dc090e700bb07f5191dc62e2641da21d3c4d213850d2e19e1fbda21
3
+ size 3719907768
data/ncedc_event_dataset_005.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8146e288237b3ef8ea34ea5909785e1a67fde85ce45d8f9107fb1518c287d320
3
+ size 4490668592
data/ncedc_event_dataset_006.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91f0a764e5e7bd525602c0476999f3d763debe5ed547b9ef3470b7348514401d
3
+ size 3700374248
data/ncedc_event_dataset_007.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fb86e9984514947a882eabdd1076d3ad66e8dd395b654212f98c4f724b80721
3
+ size 3560866384
data/ncedc_event_dataset_008.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3723127460a0a012290371c3432534cbf397e9e67de4d4bb6a5edd347f264629
3
+ size 3859170376
data/ncedc_event_dataset_009.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:768fd99faa4d1593aa327ff63a32b460a42ea8d198085db059485465d767438c
3
+ size 4055526392
dataset_script.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+ import h5py
22
+ from glob import glob
23
+
24
+ import datasets
25
+
26
+
27
+ # TODO: Add BibTeX citation
28
+ # Find for instance the citation on arxiv or on the dataset repo/website
29
+ _CITATION = """\
30
+ @InProceedings{huggingface:dataset,
31
+ title = {A great new dataset},
32
+ author={huggingface, Inc.
33
+ },
34
+ year={2020}
35
+ }
36
+ """
37
+
38
+ # TODO: Add description of the dataset here
39
+ # You can copy an official description
40
+ _DESCRIPTION = """\
41
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
42
+ """
43
+
44
+ # TODO: Add a link to an official homepage for the dataset here
45
+ _HOMEPAGE = ""
46
+
47
+ # TODO: Add the licence for the dataset here if you can find it
48
+ _LICENSE = ""
49
+
50
+ # TODO: Add link to the official dataset URLs here
51
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
+ _REPO = "https://huggingface.co/datasets/zhuwq/QuakeFlow_NC/resolve/main/data"
54
+ _URLS = {
55
+ "NCEDC": [
56
+ f"{_REPO}/ncedc_event_dataset_{i:03d}.h5"
57
+ for i in range(2)
58
+ ]
59
+ }
60
+
61
+
62
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
63
+ class NewDataset(datasets.GeneratorBasedBuilder):
64
+ """TODO: Short description of my dataset."""
65
+
66
+ VERSION = datasets.Version("1.1.0")
67
+
68
+ # This is an example of a dataset with multiple configurations.
69
+ # If you don't want/need to define several sub-sets in your dataset,
70
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
71
+
72
+ # If you need to make complex sub-parts in the datasets with configurable options
73
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
74
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
75
+
76
+ # You will be able to load one or the other configurations in the following list with
77
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
78
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
79
+ BUILDER_CONFIGS = [
80
+ datasets.BuilderConfig(name="NCEDC", version=VERSION, description="This part of my dataset covers a first domain"),
81
+ ]
82
+
83
+ DEFAULT_CONFIG_NAME = "NCEDC" # It's not mandatory to have a default configuration. Just use one if it make sense.
84
+
85
+ def _info(self):
86
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
87
+ features=datasets.Features(
88
+ {
89
+ "event_id": datasets.Value("string"),
90
+ "station_id": datasets.Value("string"),
91
+ "waveform": datasets.Array2D(shape=(3, 1200), dtype="float32"),
92
+ }
93
+ )
94
+ return datasets.DatasetInfo(
95
+ # This is the description that will appear on the datasets page.
96
+ description=_DESCRIPTION,
97
+ # This defines the different columns of the dataset and their types
98
+ features=features, # Here we define them above because they are different between the two configurations
99
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
100
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
101
+ # supervised_keys=("sentence", "label"),
102
+ # Homepage of the dataset for documentation
103
+ homepage=_HOMEPAGE,
104
+ # License for the dataset if available
105
+ license=_LICENSE,
106
+ # Citation for the dataset
107
+ citation=_CITATION,
108
+ )
109
+
110
+ def _split_generators(self, dl_manager):
111
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
112
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
113
+
114
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
115
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
116
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
117
+ urls = _URLS[self.config.name]
118
+ files = dl_manager.download_and_extract(urls)
119
+
120
+ return [
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.TRAIN,
123
+ # These kwargs will be passed to _generate_examples
124
+ gen_kwargs={
125
+ "filepath": files,
126
+ "split": "train",
127
+ },
128
+ ),
129
+ # datasets.SplitGenerator(
130
+ # name=datasets.Split.VALIDATION,
131
+ # # These kwargs will be passed to _generate_examples
132
+ # gen_kwargs={
133
+ # "filepath": os.path.join(data_dir, "dev.jsonl"),
134
+ # "split": "dev",
135
+ # },
136
+ # ),
137
+ # datasets.SplitGenerator(
138
+ # name=datasets.Split.TEST,
139
+ # # These kwargs will be passed to _generate_examples
140
+ # gen_kwargs={
141
+ # "filepath": os.path.join(data_dir, "test.jsonl"),
142
+ # "split": "test"
143
+ # },
144
+ # ),
145
+ ]
146
+
147
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
148
+ def _generate_examples(self, filepath, split):
149
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
150
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
151
+
152
+ for file in filepath:
153
+ with h5py.File(file, "r") as fp:
154
+ for event_id in sorted(list(fp.keys())):
155
+ event = fp[event_id]
156
+ for station_id in sorted(list(event.keys())):
157
+ station = event[station_id]
158
+ # print(f"{event_id = } {station_id = }")
159
+ yield event_id + "_" + station_id, {
160
+ "event_id": event_id,
161
+ "station_id": station_id,
162
+ "waveform": station[:],
163
+ }