wmgifford commited on
Commit
d0f5b86
·
1 Parent(s): e9c807b

test model for finetuned case

Browse files
ttm-r2-etth-finetuned/config.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ibm-granite/granite-timeseries-ttm-r2",
3
+ "adaptive_patching_levels": 3,
4
+ "architectures": [
5
+ "TinyTimeMixerForPrediction"
6
+ ],
7
+ "categorical_vocab_size_list": null,
8
+ "context_length": 512,
9
+ "d_model": 192,
10
+ "d_model_scale": 3,
11
+ "decoder_adaptive_patching_levels": 0,
12
+ "decoder_d_model": 128,
13
+ "decoder_d_model_scale": 2,
14
+ "decoder_mode": "mix_channel",
15
+ "decoder_num_layers": 2,
16
+ "decoder_raw_residual": false,
17
+ "distribution_output": "student_t",
18
+ "dropout": 0.4,
19
+ "enable_forecast_channel_mixing": false,
20
+ "exogenous_channel_indices": [],
21
+ "expansion_factor": 2,
22
+ "fcm_context_length": 1,
23
+ "fcm_gated_attn": true,
24
+ "fcm_mix_layers": 3,
25
+ "fcm_prepend_past": true,
26
+ "fcm_prepend_past_offset": null,
27
+ "fcm_use_mixer": true,
28
+ "frequency_token_vocab_size": 5,
29
+ "gated_attn": true,
30
+ "head_dropout": 0.4,
31
+ "init_embed": "pytorch",
32
+ "init_linear": "pytorch",
33
+ "init_processing": true,
34
+ "init_std": 0.02,
35
+ "loss": "mse",
36
+ "mode": "common_channel",
37
+ "model_type": "tinytimemixer",
38
+ "norm_eps": 1e-05,
39
+ "norm_mlp": "LayerNorm",
40
+ "num_input_channels": 7,
41
+ "num_layers": 2,
42
+ "num_parallel_samples": 100,
43
+ "num_patches": 8,
44
+ "patch_last": true,
45
+ "patch_length": 64,
46
+ "patch_stride": 64,
47
+ "positional_encoding_type": "sincos",
48
+ "post_init": false,
49
+ "prediction_channel_indices": [
50
+ 0
51
+ ],
52
+ "prediction_filter_length": null,
53
+ "prediction_length": 96,
54
+ "resolution_prefix_tuning": false,
55
+ "scaling": "std",
56
+ "self_attn": false,
57
+ "self_attn_heads": 1,
58
+ "stride_ratio": 1,
59
+ "torch_dtype": "float32",
60
+ "transformers_version": "4.46.2",
61
+ "use_decoder": true,
62
+ "use_positional_encoding": false
63
+ }
ttm-r2-etth-finetuned/generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.46.2"
4
+ }
ttm-r2-etth-finetuned/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b3c940ae55c2a11edb3e7f94336fc8d945e8d0ba07a9672ee8c2ef66de93509
3
+ size 3247008
ttm-r2-etth-finetuned/preprocessor_config.json ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "categorical_encoder": null,
3
+ "conditional_columns": [
4
+ "HUFL",
5
+ "HULL",
6
+ "MUFL",
7
+ "MULL",
8
+ "LUFL",
9
+ "LULL"
10
+ ],
11
+ "context_length": 512,
12
+ "control_columns": [],
13
+ "encode_categorical": false,
14
+ "feature_extractor_type": "TimeSeriesPreprocessor",
15
+ "freq": "1h",
16
+ "frequency_mapping": {
17
+ "10min": 4,
18
+ "15min": 5,
19
+ "2min": 2,
20
+ "30min": 6,
21
+ "5min": 3,
22
+ "W": 9,
23
+ "d": 8,
24
+ "h": 7,
25
+ "min": 1,
26
+ "oov": 0
27
+ },
28
+ "id_columns": [],
29
+ "observable_columns": [],
30
+ "prediction_length": 96,
31
+ "processor_class": "TimeSeriesPreprocessor",
32
+ "scaler_dict": {
33
+ "0": {
34
+ "copy": true,
35
+ "feature_names_in_": [
36
+ "HUFL",
37
+ "HULL",
38
+ "MUFL",
39
+ "MULL",
40
+ "LUFL",
41
+ "LULL"
42
+ ],
43
+ "mean_": [
44
+ 7.807025544026027,
45
+ 1.9638457706364187,
46
+ 4.854088594068457,
47
+ 0.7027733450419731,
48
+ 2.9906340412969477,
49
+ 0.7704704349555455
50
+ ],
51
+ "n_features_in_": 6,
52
+ "n_samples_seen_": 10452,
53
+ "scale_": [
54
+ 6.134403361410595,
55
+ 2.1455700413380594,
56
+ 5.908511495039344,
57
+ 1.9702885996497426,
58
+ 1.2502961279520424,
59
+ 0.6677933692498211
60
+ ],
61
+ "var_": [
62
+ 37.63090460048561,
63
+ 4.603470802287402,
64
+ 34.910508087012055,
65
+ 3.8820371659097437,
66
+ 1.5632404075718698,
67
+ 0.4459479840140279
68
+ ],
69
+ "with_mean": true,
70
+ "with_std": true
71
+ }
72
+ },
73
+ "scaler_type": "standard",
74
+ "scaling": true,
75
+ "scaling_id_columns": [],
76
+ "static_categorical_columns": [],
77
+ "target_columns": [
78
+ "OT"
79
+ ],
80
+ "target_scaler_dict": {
81
+ "0": {
82
+ "copy": true,
83
+ "feature_names_in_": [
84
+ "OT"
85
+ ],
86
+ "mean_": [
87
+ 17.292530528345626
88
+ ],
89
+ "n_features_in_": 1,
90
+ "n_samples_seen_": 10452,
91
+ "scale_": [
92
+ 8.513664476018814
93
+ ],
94
+ "var_": [
95
+ 72.4824828102247
96
+ ],
97
+ "with_mean": true,
98
+ "with_std": true
99
+ }
100
+ },
101
+ "time_series_task": "forecasting",
102
+ "timestamp_column": "date"
103
+ }
ttm-r2-etth-finetuned/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ba92e917cfe922e58d80dbf596abde0e26f2626659249831a09ac48bcae6ade
3
+ size 5240
ttm-r2-etth-finetuned/tsfm_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "exogenous_support": true,
3
+ "maximum_context_length": 512,
4
+ "maximum_prediction_length": 96,
5
+ "minimum_context_length": 512,
6
+ "model_class_name": "TinyTimeMixerForPrediction",
7
+ "model_config_name": "TinyTimeMixerConfig",
8
+ "model_type": "tinytimemixer",
9
+ "module_path": "tsfm_public",
10
+ "service_handler_class_name": "TinyTimeMixerForecastingHandler",
11
+ "service_handler_module_path": "tsfminference.tsfm_service_handler"
12
+ }