jacobbieker
commited on
Commit
•
01e4e87
1
Parent(s):
503ebdc
Update for some streaming things
Browse files- gfs-reforecast.py +21 -9
gfs-reforecast.py
CHANGED
@@ -41,7 +41,7 @@ _LICENSE = "US Government data, Open license, no restrictions"
|
|
41 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
42 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
43 |
_URLS = {
|
44 |
-
"
|
45 |
"raw": "raw.json",
|
46 |
"analysis": "analysis.json",
|
47 |
}
|
@@ -129,41 +129,53 @@ class GFEReforecastDataset(datasets.GeneratorBasedBuilder):
|
|
129 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
130 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
131 |
urls = _URLS[self.config.name]
|
132 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
return [
|
134 |
datasets.SplitGenerator(
|
135 |
name=datasets.Split.TRAIN,
|
136 |
# These kwargs will be passed to _generate_examples
|
137 |
gen_kwargs={
|
138 |
-
"filepath": urls,
|
139 |
"split": "train",
|
|
|
140 |
},
|
141 |
),
|
142 |
datasets.SplitGenerator(
|
143 |
name=datasets.Split.TEST,
|
144 |
# These kwargs will be passed to _generate_examples
|
145 |
gen_kwargs={
|
146 |
-
"filepath": urls,
|
147 |
"split": "test"
|
|
|
148 |
},
|
149 |
),
|
150 |
datasets.SplitGenerator(
|
151 |
name=datasets.Split.VALIDATION,
|
152 |
# These kwargs will be passed to _generate_examples
|
153 |
gen_kwargs={
|
154 |
-
"filepath": urls,
|
155 |
"split": "valid",
|
|
|
156 |
},
|
157 |
),
|
158 |
]
|
159 |
|
160 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
161 |
-
def _generate_examples(self, filepath, split):
|
162 |
|
163 |
# Load the list of files for the type of data
|
164 |
-
|
165 |
-
|
166 |
-
|
|
|
|
|
|
|
167 |
if "v16" in self.config.name:
|
168 |
idx = 0
|
169 |
for f in filepaths:
|
|
|
41 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
42 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
43 |
_URLS = {
|
44 |
+
"gfs_v16": "gfs_v16.json",
|
45 |
"raw": "raw.json",
|
46 |
"analysis": "analysis.json",
|
47 |
}
|
|
|
129 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
130 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
131 |
urls = _URLS[self.config.name]
|
132 |
+
streaming = dl_manager.is_streaming
|
133 |
+
if streaming:
|
134 |
+
urls = dl_manager.download_and_extract(urls)
|
135 |
+
else:
|
136 |
+
with open(filepath, "r") as f:
|
137 |
+
filepaths = json.load(f)
|
138 |
+
data_dir = dl_manager.download_and_extract(filepaths)
|
139 |
return [
|
140 |
datasets.SplitGenerator(
|
141 |
name=datasets.Split.TRAIN,
|
142 |
# These kwargs will be passed to _generate_examples
|
143 |
gen_kwargs={
|
144 |
+
"filepath": urls if streaming else data_dir,
|
145 |
"split": "train",
|
146 |
+
"streaming": streaming,
|
147 |
},
|
148 |
),
|
149 |
datasets.SplitGenerator(
|
150 |
name=datasets.Split.TEST,
|
151 |
# These kwargs will be passed to _generate_examples
|
152 |
gen_kwargs={
|
153 |
+
"filepath": urls if streaming else data_dir,
|
154 |
"split": "test"
|
155 |
+
"streaming": streaming,
|
156 |
},
|
157 |
),
|
158 |
datasets.SplitGenerator(
|
159 |
name=datasets.Split.VALIDATION,
|
160 |
# These kwargs will be passed to _generate_examples
|
161 |
gen_kwargs={
|
162 |
+
"filepath": urls if streaming else data_dir,
|
163 |
"split": "valid",
|
164 |
+
"streaming": streaming
|
165 |
},
|
166 |
),
|
167 |
]
|
168 |
|
169 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
170 |
+
def _generate_examples(self, filepath, split, streaming):
|
171 |
|
172 |
# Load the list of files for the type of data
|
173 |
+
if streaming:
|
174 |
+
with open(filepath, "r") as f:
|
175 |
+
filepaths = json.load(f)
|
176 |
+
filepaths = ['zip:///::https://huggingface.co/datasets/openclimatefix/gfs-reforecast/resolve/main/' + f for f in filepaths]
|
177 |
+
else:
|
178 |
+
filepaths = filepath
|
179 |
if "v16" in self.config.name:
|
180 |
idx = 0
|
181 |
for f in filepaths:
|