wmgifford commited on
Commit
8305341
1 Parent(s): d0f5b86

example with future exogenous

Browse files
ttm-r2-etth-finetuned-control/config.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ibm-granite/granite-timeseries-ttm-r2",
3
+ "adaptive_patching_levels": 3,
4
+ "architectures": [
5
+ "TinyTimeMixerForPrediction"
6
+ ],
7
+ "categorical_vocab_size_list": null,
8
+ "context_length": 512,
9
+ "d_model": 192,
10
+ "d_model_scale": 3,
11
+ "decoder_adaptive_patching_levels": 0,
12
+ "decoder_d_model": 128,
13
+ "decoder_d_model_scale": 2,
14
+ "decoder_mode": "mix_channel",
15
+ "decoder_num_layers": 2,
16
+ "decoder_raw_residual": false,
17
+ "distribution_output": "student_t",
18
+ "dropout": 0.4,
19
+ "enable_forecast_channel_mixing": false,
20
+ "exogenous_channel_indices": [
21
+ 1,
22
+ 2,
23
+ 3,
24
+ 4,
25
+ 5,
26
+ 6
27
+ ],
28
+ "expansion_factor": 2,
29
+ "fcm_context_length": 1,
30
+ "fcm_gated_attn": true,
31
+ "fcm_mix_layers": 3,
32
+ "fcm_prepend_past": true,
33
+ "fcm_prepend_past_offset": null,
34
+ "fcm_use_mixer": true,
35
+ "frequency_token_vocab_size": 5,
36
+ "gated_attn": true,
37
+ "head_dropout": 0.4,
38
+ "init_embed": "pytorch",
39
+ "init_linear": "pytorch",
40
+ "init_processing": true,
41
+ "init_std": 0.02,
42
+ "loss": "mse",
43
+ "mode": "common_channel",
44
+ "model_type": "tinytimemixer",
45
+ "norm_eps": 1e-05,
46
+ "norm_mlp": "LayerNorm",
47
+ "num_input_channels": 7,
48
+ "num_layers": 2,
49
+ "num_parallel_samples": 100,
50
+ "num_patches": 8,
51
+ "patch_last": true,
52
+ "patch_length": 64,
53
+ "patch_stride": 64,
54
+ "positional_encoding_type": "sincos",
55
+ "post_init": false,
56
+ "prediction_channel_indices": [
57
+ 0
58
+ ],
59
+ "prediction_filter_length": null,
60
+ "prediction_length": 96,
61
+ "resolution_prefix_tuning": false,
62
+ "scaling": "std",
63
+ "self_attn": false,
64
+ "self_attn_heads": 1,
65
+ "stride_ratio": 1,
66
+ "torch_dtype": "float32",
67
+ "transformers_version": "4.46.2",
68
+ "use_decoder": true,
69
+ "use_positional_encoding": false
70
+ }
ttm-r2-etth-finetuned-control/generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.46.2"
4
+ }
ttm-r2-etth-finetuned-control/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4918ebc47cd468280fa8a9db42a4d7285e206526a8a9d2b2d1c78182472ab9ff
3
+ size 3247008
ttm-r2-etth-finetuned-control/preprocessor_config.json ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "categorical_encoder": null,
3
+ "conditional_columns": [],
4
+ "context_length": 512,
5
+ "control_columns": [
6
+ "HUFL",
7
+ "HULL",
8
+ "MUFL",
9
+ "MULL",
10
+ "LUFL",
11
+ "LULL"
12
+ ],
13
+ "encode_categorical": false,
14
+ "feature_extractor_type": "TimeSeriesPreprocessor",
15
+ "freq": "1h",
16
+ "frequency_mapping": {
17
+ "10min": 4,
18
+ "15min": 5,
19
+ "2min": 2,
20
+ "30min": 6,
21
+ "5min": 3,
22
+ "W": 9,
23
+ "d": 8,
24
+ "h": 7,
25
+ "min": 1,
26
+ "oov": 0
27
+ },
28
+ "id_columns": [],
29
+ "observable_columns": [],
30
+ "prediction_length": 96,
31
+ "processor_class": "TimeSeriesPreprocessor",
32
+ "scaler_dict": {
33
+ "0": {
34
+ "copy": true,
35
+ "feature_names_in_": [
36
+ "HUFL",
37
+ "HULL",
38
+ "MUFL",
39
+ "MULL",
40
+ "LUFL",
41
+ "LULL"
42
+ ],
43
+ "mean_": [
44
+ 7.807025544026027,
45
+ 1.9638457706364187,
46
+ 4.854088594068457,
47
+ 0.7027733450419731,
48
+ 2.9906340412969477,
49
+ 0.7704704349555455
50
+ ],
51
+ "n_features_in_": 6,
52
+ "n_samples_seen_": 10452,
53
+ "scale_": [
54
+ 6.134403361410595,
55
+ 2.1455700413380594,
56
+ 5.908511495039344,
57
+ 1.9702885996497426,
58
+ 1.2502961279520424,
59
+ 0.6677933692498211
60
+ ],
61
+ "var_": [
62
+ 37.63090460048561,
63
+ 4.603470802287402,
64
+ 34.910508087012055,
65
+ 3.8820371659097437,
66
+ 1.5632404075718698,
67
+ 0.4459479840140279
68
+ ],
69
+ "with_mean": true,
70
+ "with_std": true
71
+ }
72
+ },
73
+ "scaler_type": "standard",
74
+ "scaling": true,
75
+ "scaling_id_columns": [],
76
+ "static_categorical_columns": [],
77
+ "target_columns": [
78
+ "OT"
79
+ ],
80
+ "target_scaler_dict": {
81
+ "0": {
82
+ "copy": true,
83
+ "feature_names_in_": [
84
+ "OT"
85
+ ],
86
+ "mean_": [
87
+ 17.292530528345626
88
+ ],
89
+ "n_features_in_": 1,
90
+ "n_samples_seen_": 10452,
91
+ "scale_": [
92
+ 8.513664476018814
93
+ ],
94
+ "var_": [
95
+ 72.4824828102247
96
+ ],
97
+ "with_mean": true,
98
+ "with_std": true
99
+ }
100
+ },
101
+ "time_series_task": "forecasting",
102
+ "timestamp_column": "date"
103
+ }
ttm-r2-etth-finetuned-control/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ba92e917cfe922e58d80dbf596abde0e26f2626659249831a09ac48bcae6ade
3
+ size 5240
ttm-r2-etth-finetuned-control/tsfm_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "exogenous_support": true,
3
+ "is_finetuned": true,
4
+ "maximum_context_length": 512,
5
+ "maximum_prediction_length": 96,
6
+ "minimum_context_length": 512,
7
+ "model_class_name": "TinyTimeMixerForPrediction",
8
+ "model_config_name": "TinyTimeMixerConfig",
9
+ "model_type": "tinytimemixer",
10
+ "module_path": "tsfm_public",
11
+ "service_handler_class_name": "TinyTimeMixerForecastingHandler",
12
+ "service_handler_module_path": "tsfminference.tsfm_service_handler"
13
+ }