Datasets:

ArXiv:
License:
gorold commited on
Commit
846afb5
1 Parent(s): 3b7465d

add loading script

Browse files
Files changed (1) hide show
  1. cloudops_tsf.py +304 -0
cloudops_tsf.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass, field
15
+ from typing import Iterator, Optional
16
+ from functools import cached_property
17
+
18
+ import datasets
19
+ import pandas as pd
20
+ import pyarrow as pa
21
+ import pyarrow.parquet as pq
22
+ from gluonts.dataset.field_names import FieldName
23
+
24
+ _CITATION = """\
25
+ @article{woo2023pushing,
26
+ title={Pushing the Limits of Pre-training for Time Series Forecasting in the CloudOps Domain},
27
+ author={Woo, Gerald and Liu, Chenghao and Kumar, Akshat and Sahoo, Doyen},
28
+ journal={arXiv preprint arXiv:2310.05063},
29
+ year={2023}
30
+ }
31
+ """
32
+
33
+ _URLS = {
34
+ "azure_vm_traces_2017": "azure_vm_traces_2017.parquet",
35
+ "borg_cluster_data_2011": "borg_cluster_data_2011.parquet",
36
+ "alibaba_cluster_trace_2018": "alibaba_cluster_trace_2018.parquet",
37
+ }
38
+
39
+ _CONFIGS = {
40
+ "azure_vm_traces_2017": {
41
+ "optional_fields": (
42
+ FieldName.FEAT_STATIC_CAT,
43
+ FieldName.FEAT_STATIC_REAL,
44
+ FieldName.PAST_FEAT_DYNAMIC_REAL,
45
+ ),
46
+ "prediction_length": 48,
47
+ "freq": "5T",
48
+ "stride": 48,
49
+ "univariate": True,
50
+ "multivariate": False,
51
+ "rolling_evaluations": 12,
52
+ "test_split_date": pd.Period(
53
+ year=2016, month=12, day=13, hour=15, minute=55, freq="5T"
54
+ ),
55
+ "_feat_static_cat_cardinalities": {
56
+ "train_test": (
57
+ ("vm_id", 17568),
58
+ ("subscription_id", 2713),
59
+ ("deployment_id", 3255),
60
+ ("vm_category", 3),
61
+ ),
62
+ "pretrain": (
63
+ ("vm_id", 177040),
64
+ ("subscription_id", 5514),
65
+ ("deployment_id", 15208),
66
+ ("vm_category", 3),
67
+ ),
68
+ },
69
+ "target_dim": 1,
70
+ "feat_static_real_dim": 3,
71
+ "past_feat_dynamic_real_dim": 2,
72
+ },
73
+ "borg_cluster_data_2011": {
74
+ "optional_fields": (
75
+ FieldName.FEAT_STATIC_CAT,
76
+ FieldName.PAST_FEAT_DYNAMIC_REAL,
77
+ ),
78
+ "prediction_length": 48,
79
+ "freq": "5T",
80
+ "stride": 48,
81
+ "univariate": False,
82
+ "multivariate": True,
83
+ "rolling_evaluations": 12,
84
+ "test_split_date": pd.Period(
85
+ year=2011, month=5, day=28, hour=18, minute=55, freq="5T"
86
+ ),
87
+ "_feat_static_cat_cardinalities": {
88
+ "train_test": (
89
+ ("job_id", 850),
90
+ ("task_id", 11117),
91
+ ("user", 282),
92
+ ("scheduling_class", 4),
93
+ ("logical_job_name", 718),
94
+ ),
95
+ "pretrain": (
96
+ ("job_id", 6072),
97
+ ("task_id", 154503),
98
+ ("user", 518),
99
+ ("scheduling_class", 4),
100
+ ("logical_job_name", 3899),
101
+ ),
102
+ },
103
+ "target_dim": 2,
104
+ "past_feat_dynamic_real_dim": 5,
105
+ },
106
+ "alibaba_cluster_trace_2018": {
107
+ "optional_fields": (
108
+ FieldName.FEAT_STATIC_CAT,
109
+ FieldName.PAST_FEAT_DYNAMIC_REAL,
110
+ ),
111
+ "prediction_length": 48,
112
+ "freq": "5T",
113
+ "stride": 48,
114
+ "univariate": False,
115
+ "multivariate": True,
116
+ "rolling_evaluations": 12,
117
+ "test_split_date": pd.Period(
118
+ year=2018, month=1, day=8, hour=11, minute=55, freq="5T"
119
+ ),
120
+ "_feat_static_cat_cardinalities": {
121
+ "train_test": (
122
+ ("container_id", 6048),
123
+ ("app_du", 1292),
124
+ ),
125
+ "pretrain": (
126
+ ("container_id", 64457),
127
+ ("app_du", 9484),
128
+ ),
129
+ },
130
+ "target_dim": 2,
131
+ "past_feat_dynamic_real_dim": 6,
132
+ },
133
+ }
134
+
135
+ PRETRAIN = datasets.splits.NamedSplit("pretrain")
136
+ TRAIN_TEST = datasets.splits.NamedSplit("train_test")
137
+
138
+ Cardinalities = tuple[tuple[str, int], ...]
139
+
140
+
141
+ @dataclass
142
+ class AIOpsTSFConfig(datasets.BuilderConfig):
143
+ """BuilderConfig for AIOpsTSF."""
144
+
145
+ # load_dataset kwargs
146
+ train_test: bool = field(default=True, init=False)
147
+ pretrain: bool = field(default=False, init=False)
148
+ _include_metadata: tuple[str, ...] = field(default_factory=tuple, init=False)
149
+
150
+ # builder kwargs
151
+ prediction_length: int = field(default=None)
152
+ freq: str = field(default=None)
153
+ stride: int = field(default=None)
154
+ univariate: bool = field(default=None)
155
+ multivariate: bool = field(default=None)
156
+ optional_fields: tuple[str, ...] = field(default=None)
157
+ rolling_evaluations: int = field(default=None)
158
+ test_split_date: pd.Period = field(default=None)
159
+ _feat_static_cat_cardinalities: dict[str, Cardinalities] = field(
160
+ default_factory=dict
161
+ )
162
+ target_dim: int = field(default=1)
163
+ feat_static_real_dim: int = field(default=0)
164
+ past_feat_dynamic_real_dim: int = field(default=0)
165
+
166
+ METADATA = [
167
+ "freq",
168
+ "prediction_length",
169
+ "stride",
170
+ "rolling_evaluations",
171
+ ]
172
+
173
+ @property
174
+ def include_metadata(self) -> tuple[str, ...]:
175
+ return self._include_metadata
176
+
177
+ @include_metadata.setter
178
+ def include_metadata(self, value: tuple[str, ...]):
179
+ assert all([v in self.METADATA for v in value]), (
180
+ f"Metadata: {value} is not supported, each item should be one of"
181
+ f" {self.METADATA}"
182
+ )
183
+ self._include_metadata = value
184
+
185
+ @cached_property
186
+ def feat_static_cat_cardinalities(self) -> Optional[list[int]]:
187
+ if FieldName.FEAT_STATIC_CAT not in self.optional_fields:
188
+ return None
189
+
190
+ if self.pretrain:
191
+ split = "pretrain"
192
+ elif self.train_test:
193
+ split = "train_test"
194
+ else:
195
+ raise ValueError(
196
+ "At least one of `train_test` and `pretrain` should be True"
197
+ )
198
+ return [c[1] for c in self._feat_static_cat_cardinalities[split]]
199
+
200
+
201
+ class AIOpsTSF(datasets.ArrowBasedBuilder):
202
+ VERSION = datasets.Version("1.0.0")
203
+
204
+ BUILDER_CONFIGS = []
205
+ for dataset, config in _CONFIGS.items():
206
+ BUILDER_CONFIGS.append(
207
+ AIOpsTSFConfig(name=dataset, version=VERSION, description="", **config)
208
+ )
209
+
210
+ def _info(self) -> datasets.DatasetInfo:
211
+ def sequence_feature(dtype: str, univar: bool) -> datasets.Sequence:
212
+ if univar:
213
+ return datasets.Sequence(datasets.Value(dtype))
214
+ return datasets.Sequence(datasets.Sequence(datasets.Value(dtype)))
215
+
216
+ features = {
217
+ FieldName.START: datasets.Value("timestamp[s]"),
218
+ FieldName.TARGET: sequence_feature("float32", self.config.univariate),
219
+ FieldName.ITEM_ID: datasets.Value("string"),
220
+ }
221
+
222
+ CAT_FEATS = (
223
+ FieldName.FEAT_STATIC_CAT,
224
+ FieldName.FEAT_DYNAMIC_CAT,
225
+ FieldName.PAST_FEAT_DYNAMIC,
226
+ )
227
+ REAL_FEATS = (
228
+ FieldName.FEAT_STATIC_REAL,
229
+ FieldName.FEAT_DYNAMIC_REAL,
230
+ FieldName.PAST_FEAT_DYNAMIC_REAL,
231
+ )
232
+ STATIC_FEATS = (FieldName.FEAT_STATIC_CAT, FieldName.FEAT_STATIC_REAL)
233
+ DYNAMIC_FEATS = (
234
+ FieldName.FEAT_DYNAMIC_CAT,
235
+ FieldName.FEAT_DYNAMIC_REAL,
236
+ FieldName.PAST_FEAT_DYNAMIC,
237
+ FieldName.PAST_FEAT_DYNAMIC_REAL,
238
+ )
239
+
240
+ for ts_field in self.config.optional_fields:
241
+ # Determine field dtype
242
+ if ts_field in CAT_FEATS:
243
+ dtype = "int32"
244
+ elif ts_field in REAL_FEATS:
245
+ dtype = "float32"
246
+ else:
247
+ raise ValueError(f"Invalid field: {ts_field}")
248
+
249
+ # Determine field shape
250
+ if ts_field in STATIC_FEATS:
251
+ univar = True
252
+ elif ts_field in DYNAMIC_FEATS:
253
+ univar = False
254
+ else:
255
+ raise ValueError(f"Invalid field: {ts_field}")
256
+
257
+ features[ts_field] = sequence_feature(dtype, univar)
258
+
259
+ for metadata in self.config.include_metadata:
260
+ if metadata == "freq":
261
+ features[metadata] = datasets.Value("string")
262
+ elif metadata in ("prediction_length", "stride", "rolling_evaluations"):
263
+ features[metadata] = datasets.Value("int32")
264
+ else:
265
+ raise ValueError(f"Invalid metadata: {metadata}")
266
+
267
+ features = datasets.Features(features)
268
+
269
+ return datasets.DatasetInfo(
270
+ features=features,
271
+ citation=_CITATION,
272
+ )
273
+
274
+ def _split_generators(self, dl_manager) -> list[datasets.SplitGenerator]:
275
+ split = 'train_test' if self.config.train_test else 'pretrain'
276
+ url = _URLS[self.config.name] + f'/split={split}'
277
+ downloaded_files = dl_manager.download(url)
278
+
279
+ generators = [
280
+ datasets.SplitGenerator(
281
+ name=TRAIN_TEST if self.config.train_test else PRETRAIN,
282
+ gen_kwargs={"filepath": downloaded_files}
283
+ )
284
+ ]
285
+ return generators
286
+
287
+ def _generate_tables(self, filepath: str) -> Iterator[pa.Table]:
288
+ table = pq.read_table(filepath)
289
+
290
+ for batch in table.to_batches():
291
+ columns = batch.columns
292
+ schema = batch.schema
293
+ if self.config.include_metadata:
294
+ freq = pa.array([self.config.freq] * len(batch))
295
+ prediction_length = pa.array([self.config.prediction_length] * len(batch))
296
+ rolling_evaluations = pa.array([self.config.rolling_evaluations] * len(batch))
297
+ stride = pa.array([self.config.stride] * len(batch))
298
+ columns += [freq, prediction_length, rolling_evaluations, stride]
299
+ for pa_field in [pa.field('freq', pa.string()),
300
+ pa.field('prediction_length', pa.int32()),
301
+ pa.field('rolling_evaluations', pa.int32()),
302
+ pa.field('stride', pa.int32())]:
303
+ schema = schema.append(pa_field)
304
+ yield batch[FieldName.ITEM_ID].to_pylist(), pa.Table.from_arrays(columns, schema=schema)