Datasets:

ArXiv:
License:
gorold commited on
Commit
19e62cf
1 Parent(s): 846afb5
alibaba_cluster_trace_2018/pretrain.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ec2579cc63c5f56c504d91134d5427b1bdff4046e6c1d6bef1a458965087378
3
+ size 1161078571
alibaba_cluster_trace_2018/train_test.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9011da57e1c5e366b04aec17f2734886fa0e52552c26e8df896702004ffdec53
3
+ size 103588580
azure_vm_traces_2017/pretrain.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41030c98d4a0ff52bd80bc0d67b0795e7ea48e5e6df8964a113676bc00e16ef0
3
+ size 9889999837
azure_vm_traces_2017/train_test.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de820360d155f8e24900f5156e8696d9ad727f98f72baaa34bbf7346ce955269
3
+ size 1409364386
borg_cluster_data_2011/pretrain.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e40623398b6db5fdf4b098eeb7873496fdae8b4c20b91ff8d9b4e44fcccba02
3
+ size 3148616998
borg_cluster_data_2011/train_test.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e99c2ba0735ccd2284e22d47aaa4ce43f1251f830d4673019e4e255fbc5d9d2e
3
+ size 357008427
cloudops_tsf.py CHANGED
@@ -30,11 +30,6 @@ _CITATION = """\
30
  }
31
  """
32
 
33
- _URLS = {
34
- "azure_vm_traces_2017": "azure_vm_traces_2017.parquet",
35
- "borg_cluster_data_2011": "borg_cluster_data_2011.parquet",
36
- "alibaba_cluster_trace_2018": "alibaba_cluster_trace_2018.parquet",
37
- }
38
 
39
  _CONFIGS = {
40
  "azure_vm_traces_2017": {
@@ -139,8 +134,8 @@ Cardinalities = tuple[tuple[str, int], ...]
139
 
140
 
141
  @dataclass
142
- class AIOpsTSFConfig(datasets.BuilderConfig):
143
- """BuilderConfig for AIOpsTSF."""
144
 
145
  # load_dataset kwargs
146
  train_test: bool = field(default=True, init=False)
@@ -198,13 +193,13 @@ class AIOpsTSFConfig(datasets.BuilderConfig):
198
  return [c[1] for c in self._feat_static_cat_cardinalities[split]]
199
 
200
 
201
- class AIOpsTSF(datasets.ArrowBasedBuilder):
202
  VERSION = datasets.Version("1.0.0")
203
 
204
  BUILDER_CONFIGS = []
205
  for dataset, config in _CONFIGS.items():
206
  BUILDER_CONFIGS.append(
207
- AIOpsTSFConfig(name=dataset, version=VERSION, description="", **config)
208
  )
209
 
210
  def _info(self) -> datasets.DatasetInfo:
@@ -272,16 +267,23 @@ class AIOpsTSF(datasets.ArrowBasedBuilder):
272
  )
273
 
274
  def _split_generators(self, dl_manager) -> list[datasets.SplitGenerator]:
275
- split = 'train_test' if self.config.train_test else 'pretrain'
276
- url = _URLS[self.config.name] + f'/split={split}'
277
- downloaded_files = dl_manager.download(url)
278
-
279
- generators = [
280
- datasets.SplitGenerator(
281
- name=TRAIN_TEST if self.config.train_test else PRETRAIN,
282
- gen_kwargs={"filepath": downloaded_files}
 
 
 
 
 
 
 
 
283
  )
284
- ]
285
  return generators
286
 
287
  def _generate_tables(self, filepath: str) -> Iterator[pa.Table]:
 
30
  }
31
  """
32
 
 
 
 
 
 
33
 
34
  _CONFIGS = {
35
  "azure_vm_traces_2017": {
 
134
 
135
 
136
  @dataclass
137
+ class CloudOpsTSFConfig(datasets.BuilderConfig):
138
+ """BuilderConfig for CloudOpsTSF."""
139
 
140
  # load_dataset kwargs
141
  train_test: bool = field(default=True, init=False)
 
193
  return [c[1] for c in self._feat_static_cat_cardinalities[split]]
194
 
195
 
196
+ class CloudOpsTSF(datasets.ArrowBasedBuilder):
197
  VERSION = datasets.Version("1.0.0")
198
 
199
  BUILDER_CONFIGS = []
200
  for dataset, config in _CONFIGS.items():
201
  BUILDER_CONFIGS.append(
202
+ CloudOpsTSFConfig(name=dataset, version=VERSION, description="", **config)
203
  )
204
 
205
  def _info(self) -> datasets.DatasetInfo:
 
267
  )
268
 
269
  def _split_generators(self, dl_manager) -> list[datasets.SplitGenerator]:
270
+ generators = []
271
+ if self.config.train_test:
272
+ downloaded_files = dl_manager.download_and_extract(f"{self.config.name}/train_test.zip")
273
+ generators.append(
274
+ datasets.SplitGenerator(
275
+ name=TRAIN_TEST if self.config.train_test else PRETRAIN,
276
+ gen_kwargs={"filepath": downloaded_files}
277
+ )
278
+ )
279
+ if self.config.pretrain:
280
+ downloaded_files = dl_manager.download_and_extract(f"{self.config.name}/pretrain.zip")
281
+ generators.append(
282
+ datasets.SplitGenerator(
283
+ name=PRETRAIN,
284
+ gen_kwargs={"filepath": downloaded_files}
285
+ )
286
  )
 
287
  return generators
288
 
289
  def _generate_tables(self, filepath: str) -> Iterator[pa.Table]: