|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Electricity Transformer Temperature (ETT) dataset.""" |
|
from dataclasses import dataclass |
|
|
|
import pandas as pd |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{haoyietal-informer-2021, |
|
author = {Haoyi Zhou and |
|
Shanghang Zhang and |
|
Jieqi Peng and |
|
Shuai Zhang and |
|
Jianxin Li and |
|
Hui Xiong and |
|
Wancai Zhang}, |
|
title = {Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting}, |
|
booktitle = {The Thirty-Fifth {AAAI} Conference on Artificial Intelligence, {AAAI} 2021, Virtual Conference}, |
|
volume = {35}, |
|
number = {12}, |
|
pages = {11106--11115}, |
|
publisher = {{AAAI} Press}, |
|
year = {2021}, |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The data of Electricity Transformers from two separated counties |
|
in China collected for two years at hourly and 15-min frequencies. |
|
Each data point consists of the target value "oil temperature" and |
|
6 power load features. The train/val/test is 12/4/4 months. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/zhouhaoyi/ETDataset" |
|
|
|
_LICENSE = "The Creative Commons Attribution 4.0 International License. https://creativecommons.org/licenses/by/4.0/" |
|
|
|
|
|
|
|
_URLS = { |
|
"h1": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTh1.csv", |
|
"h2": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTh2.csv", |
|
"m1": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTm1.csv", |
|
"m2": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTm2.csv", |
|
} |
|
|
|
|
|
@dataclass |
|
class ETTBuilderConfig(datasets.BuilderConfig): |
|
"""ETT builder config.""" |
|
|
|
prediction_length: int = 24 |
|
multivariate: bool = False |
|
|
|
|
|
class ETT(datasets.GeneratorBasedBuilder): |
|
"""Electricity Transformer Temperature (ETT) dataset""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
ETTBuilderConfig( |
|
name="h1", |
|
version=VERSION, |
|
description="Time series from first county at hourly frequency.", |
|
), |
|
ETTBuilderConfig( |
|
name="h2", |
|
version=VERSION, |
|
description="Time series from second county at hourly frequency.", |
|
), |
|
ETTBuilderConfig( |
|
name="m1", |
|
version=VERSION, |
|
description="Time series from first county at 15-min frequency.", |
|
), |
|
ETTBuilderConfig( |
|
name="m2", |
|
version=VERSION, |
|
description="Time series from second county at 15-min frequency.", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "h1" |
|
|
|
def _info(self): |
|
if self.config.multivariate: |
|
features = datasets.Features( |
|
{ |
|
"start": datasets.Value("timestamp[s]"), |
|
"target": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))), |
|
"feat_static_cat": datasets.Sequence(datasets.Value("uint64")), |
|
"item_id": datasets.Value("string"), |
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"start": datasets.Value("timestamp[s]"), |
|
"target": datasets.Sequence(datasets.Value("float32")), |
|
"feat_static_cat": datasets.Sequence(datasets.Value("uint64")), |
|
"feat_dynamic_real": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))), |
|
"item_id": datasets.Value("string"), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS[self.config.name] |
|
filepath = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": filepath, |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": filepath, |
|
"split": "test", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": filepath, |
|
"split": "dev", |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
data = pd.read_csv(filepath, parse_dates=True, index_col=0) |
|
start_date = data.index.min() |
|
|
|
if self.config.name in ["m1", "m2"]: |
|
factor = 4 |
|
else: |
|
factor = 1 |
|
train_end_date_index = 12 * 30 * 24 * factor |
|
|
|
if split == "dev": |
|
end_date_index = train_end_date_index + 4 * 30 * 24 * factor |
|
else: |
|
end_date_index = train_end_date_index + 8 * 30 * 24 * factor |
|
|
|
if self.config.multivariate: |
|
if split in ["test", "dev"]: |
|
|
|
for i, index in enumerate( |
|
range( |
|
train_end_date_index, |
|
end_date_index, |
|
self.config.prediction_length, |
|
) |
|
): |
|
yield i, { |
|
"start": start_date, |
|
"target": data[: index + self.config.prediction_length].values.astype("float32").T, |
|
"feat_static_cat": [0], |
|
"item_id": "0", |
|
} |
|
else: |
|
yield 0, { |
|
"start": start_date, |
|
"target": data[:train_end_date_index].values.astype("float32").T, |
|
"feat_static_cat": [0], |
|
"item_id": "0", |
|
} |
|
else: |
|
if split in ["test", "dev"]: |
|
|
|
for i, index in enumerate( |
|
range( |
|
train_end_date_index, |
|
end_date_index, |
|
self.config.prediction_length, |
|
) |
|
): |
|
target = data["OT"][: index + self.config.prediction_length].values.astype("float32") |
|
feat_dynamic_real = data[["HUFL", "HULL", "MUFL", "MULL", "LUFL", "LULL"]][ |
|
: index + self.config.prediction_length |
|
].values.T.astype("float32") |
|
yield i, { |
|
"start": start_date, |
|
"target": target, |
|
"feat_dynamic_real": feat_dynamic_real, |
|
"feat_static_cat": [0], |
|
"item_id": "OT", |
|
} |
|
else: |
|
target = data["OT"][:train_end_date_index].values.astype("float32") |
|
feat_dynamic_real = data[["HUFL", "HULL", "MUFL", "MULL", "LUFL", "LULL"]][ |
|
:train_end_date_index |
|
].values.T.astype("float32") |
|
yield 0, { |
|
"start": start_date, |
|
"target": target, |
|
"feat_dynamic_real": feat_dynamic_real, |
|
"feat_static_cat": [0], |
|
"item_id": "OT", |
|
} |
|
|