philippds commited on
Commit
3e4288c
1 Parent(s): e32bb59

Upload 15 files

Browse files
Agent.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acc018812ef7f8f3b1bb4c12b81cf3450fee35ff95e453528161bb47fcf9d408
3
+ size 883467
Agent/Agent-1499754.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88cbf3968a112cf64630e135c5ea854ee6cee27b06ea73077148421afecfb97a
3
+ size 883467
Agent/Agent-1499754.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aedd6ec7be7ec67e0388d600986b24e9dc435427e10c091f1f3a9a2ce35caf07
3
+ size 5302191
Agent/Agent-1802754.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acc018812ef7f8f3b1bb4c12b81cf3450fee35ff95e453528161bb47fcf9d408
3
+ size 883467
Agent/Agent-1802754.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32be43d1eb0d8acc0ee485244805059e505e22691df0f1205c753a2ecc98e12b
3
+ size 5302191
Agent/Agent-497304.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4e906b8b068a36bb1c67f416e93070d133b36b7dc45f5dd9a85939e80be76d9
3
+ size 883467
Agent/Agent-497304.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:609a5bb24e34dbdabda0f81ee8045f8c8d15f83470c847db8019e78af630eee1
3
+ size 5302191
Agent/Agent-998372.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ee569c3aaade89444bfe742fe09b4538de983fcedf3aa3ecf210bfc464a92ba
3
+ size 883467
Agent/Agent-998372.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea8d5d92c87f32f2e4fc85f273ff46d68ef2f8e5d596ae06e89857c6834daa9e
3
+ size 5302191
Agent/checkpoint.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32be43d1eb0d8acc0ee485244805059e505e22691df0f1205c753a2ecc98e12b
3
+ size 5302191
Agent/events.out.tfevents.1718245819.RICHARD.21080.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19ccd42eb88ede5f34c9f4cf933851ba3af644fa2ede2495153411c356f963db
3
+ size 1495809
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: hivex
3
+ original_train_name: AerialWildfireSuppression_difficulty_1_task_2_run_id_1_train
4
+ tags:
5
+ - hivex
6
+ - hivex-aerial-wildfire-suppression
7
+ - reinforcement-learning
8
+ - multi-agent-reinforcement-learning
9
+ model-index:
10
+ - name: hivex-AWS-PPO-baseline-task-2-difficulty-1
11
+ results:
12
+ - task:
13
+ type: sub-task
14
+ name: maximize_preparing_non_burning_trees
15
+ task-id: 2
16
+ difficulty-id: 1
17
+ dataset:
18
+ name: hivex-aerial-wildfire-suppression
19
+ type: hivex-aerial-wildfire-suppression
20
+ metrics:
21
+ - type: crash_count
22
+ value: 0.10833333656191826 +/- 0.13545462085140364
23
+ name: Crash Count
24
+ verified: true
25
+ - type: extinguishing_trees
26
+ value: 12.764999979734421 +/- 20.904079059070074
27
+ name: Extinguishing Trees
28
+ verified: true
29
+ - type: extinguishing_trees_reward
30
+ value: 63.82500011920929 +/- 104.52039570112963
31
+ name: Extinguishing Trees Reward
32
+ verified: true
33
+ - type: fire_out
34
+ value: 0.46333333626389506 +/- 0.3753984235292453
35
+ name: Fire Out
36
+ verified: true
37
+ - type: fire_too_close_to_city
38
+ value: 0.7550000011920929 +/- 0.37763111996880777
39
+ name: Fire too Close to City
40
+ verified: true
41
+ - type: preparing_trees
42
+ value: 561.5350036621094 +/- 325.15177018123586
43
+ name: Preparing Trees
44
+ verified: true
45
+ - type: preparing_trees_reward
46
+ value: 2807.674984741211 +/- 1625.7588563088673
47
+ name: Preparing Trees Reward
48
+ verified: true
49
+ - type: water_drop
50
+ value: 19.77666656970978 +/- 9.682256516548964
51
+ name: Water Drop
52
+ verified: true
53
+ - type: water_pickup
54
+ value: 19.191666650772095 +/- 9.573244118078943
55
+ name: Water Pickup
56
+ verified: true
57
+ - type: cumulative_reward
58
+ value: 3287.165838623047 +/- 1376.9107350360173
59
+ name: Cumulative Reward
60
+ verified: true
61
+ ---
62
+
63
+ This model serves as the baseline for the **Aerial Wildfire Suppression** environment, trained and tested on task <code>2</code> with difficulty <code>1</code> using the Proximal Policy Optimization (PPO) algorithm.<br><br>
64
+
65
+ Environment: **Aerial Wildfire Suppression**<br>
66
+ Task: <code>2</code><br>
67
+ Difficulty: <code>1</code><br>
68
+ Algorithm: <code>PPO</code><br>
69
+ Episode Length: <code>3000</code><br>
70
+ Training <code>max_steps</code>: <code>1800000</code><br>
71
+ Testing <code>max_steps</code>: <code>180000</code><br><br>
72
+
73
+ Train & Test [Scripts](https://github.com/hivex-research/hivex)<br>
74
+ Download the [Environment](https://github.com/hivex-research/hivex-environments)
configuration.yaml ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default_settings: null
2
+ behaviors:
3
+ Agent:
4
+ trainer_type: ppo
5
+ hyperparameters:
6
+ batch_size: 256
7
+ buffer_size: 4096
8
+ learning_rate: 0.0003
9
+ beta: 0.005
10
+ epsilon: 0.2
11
+ lambd: 0.95
12
+ num_epoch: 3
13
+ shared_critic: false
14
+ learning_rate_schedule: linear
15
+ beta_schedule: linear
16
+ epsilon_schedule: linear
17
+ network_settings:
18
+ normalize: false
19
+ hidden_units: 256
20
+ num_layers: 2
21
+ vis_encode_type: simple
22
+ memory: null
23
+ goal_conditioning_type: hyper
24
+ deterministic: false
25
+ reward_signals:
26
+ extrinsic:
27
+ gamma: 0.995
28
+ strength: 1.0
29
+ network_settings:
30
+ normalize: false
31
+ hidden_units: 128
32
+ num_layers: 2
33
+ vis_encode_type: simple
34
+ memory: null
35
+ goal_conditioning_type: hyper
36
+ deterministic: false
37
+ init_path: null
38
+ keep_checkpoints: 5
39
+ checkpoint_interval: 500000
40
+ max_steps: 1800000
41
+ time_horizon: 4096
42
+ summary_freq: 9000
43
+ threaded: true
44
+ self_play: null
45
+ behavioral_cloning: null
46
+ env_settings:
47
+ env_path: c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/dev_environments/Hivex_AerialWildfireSuppression_win
48
+ env_args: null
49
+ base_port: 5009
50
+ num_envs: 1
51
+ num_areas: 1
52
+ seed: 5000
53
+ max_lifetime_restarts: 10
54
+ restarts_rate_limit_n: 1
55
+ restarts_rate_limit_period_s: 60
56
+ engine_settings:
57
+ width: 84
58
+ height: 84
59
+ quality_level: 5
60
+ time_scale: 20
61
+ target_frame_rate: -1
62
+ capture_frame_rate: 60
63
+ no_graphics: true
64
+ environment_parameters:
65
+ difficulty:
66
+ curriculum:
67
+ - value:
68
+ sampler_type: constant
69
+ sampler_parameters:
70
+ seed: 5000
71
+ value: 1
72
+ name: difficulty
73
+ completion_criteria: null
74
+ task:
75
+ curriculum:
76
+ - value:
77
+ sampler_type: constant
78
+ sampler_parameters:
79
+ seed: 5001
80
+ value: 2
81
+ name: task
82
+ completion_criteria: null
83
+ checkpoint_settings:
84
+ run_id: AerialWildfireSuppression/train/AerialWildfireSuppression_difficulty_1_task_2_run_id_1_train
85
+ initialize_from: null
86
+ load_model: false
87
+ resume: false
88
+ force: false
89
+ train_model: false
90
+ inference: false
91
+ results_dir: results
92
+ torch_settings:
93
+ device: null
94
+ debug: false
run_logs/timers.json ADDED
@@ -0,0 +1,527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "root",
3
+ "gauges": {
4
+ "Agent.Policy.Entropy.mean": {
5
+ "value": 1.4084457159042358,
6
+ "min": 1.4067686796188354,
7
+ "max": 1.5088719129562378,
8
+ "count": 200
9
+ },
10
+ "Agent.Policy.Entropy.sum": {
11
+ "value": 12709.814453125,
12
+ "min": 1415.8470458984375,
13
+ "max": 25095.46875,
14
+ "count": 200
15
+ },
16
+ "Agent.AerialWildfireSuppression.WaterDrop.mean": {
17
+ "value": 23.0,
18
+ "min": 0.6666666666666666,
19
+ "max": 71.0,
20
+ "count": 200
21
+ },
22
+ "Agent.AerialWildfireSuppression.WaterDrop.sum": {
23
+ "value": 69.0,
24
+ "min": 2.0,
25
+ "max": 213.0,
26
+ "count": 200
27
+ },
28
+ "Agent.AerialWildfireSuppression.WaterPickup.mean": {
29
+ "value": 22.333333333333332,
30
+ "min": 0.6666666666666666,
31
+ "max": 70.33333333333333,
32
+ "count": 200
33
+ },
34
+ "Agent.AerialWildfireSuppression.WaterPickup.sum": {
35
+ "value": 67.0,
36
+ "min": 2.0,
37
+ "max": 211.0,
38
+ "count": 200
39
+ },
40
+ "Agent.AerialWildfireSuppression.PreparingTreesReward.mean": {
41
+ "value": 3695.0,
42
+ "min": 88.33333333333333,
43
+ "max": 8893.333333333334,
44
+ "count": 200
45
+ },
46
+ "Agent.AerialWildfireSuppression.PreparingTreesReward.sum": {
47
+ "value": 11085.0,
48
+ "min": 265.0,
49
+ "max": 34380.0,
50
+ "count": 200
51
+ },
52
+ "Agent.AerialWildfireSuppression.PreparingTrees.mean": {
53
+ "value": 739.0,
54
+ "min": 17.666666666666668,
55
+ "max": 1778.6666666666667,
56
+ "count": 200
57
+ },
58
+ "Agent.AerialWildfireSuppression.PreparingTrees.sum": {
59
+ "value": 2217.0,
60
+ "min": 53.0,
61
+ "max": 6876.0,
62
+ "count": 200
63
+ },
64
+ "Agent.AerialWildfireSuppression.ExtinguishingTreesReward.mean": {
65
+ "value": 0.0,
66
+ "min": 0.0,
67
+ "max": 725.0,
68
+ "count": 200
69
+ },
70
+ "Agent.AerialWildfireSuppression.ExtinguishingTreesReward.sum": {
71
+ "value": 0.0,
72
+ "min": 0.0,
73
+ "max": 4510.0,
74
+ "count": 200
75
+ },
76
+ "Agent.AerialWildfireSuppression.ExtinguishingTrees.mean": {
77
+ "value": 0.0,
78
+ "min": 0.0,
79
+ "max": 145.0,
80
+ "count": 200
81
+ },
82
+ "Agent.AerialWildfireSuppression.ExtinguishingTrees.sum": {
83
+ "value": 0.0,
84
+ "min": 0.0,
85
+ "max": 902.0,
86
+ "count": 200
87
+ },
88
+ "Agent.AerialWildfireSuppression.FireOut.mean": {
89
+ "value": 0.0,
90
+ "min": 0.0,
91
+ "max": 1.0,
92
+ "count": 200
93
+ },
94
+ "Agent.AerialWildfireSuppression.FireOut.sum": {
95
+ "value": 0.0,
96
+ "min": 0.0,
97
+ "max": 6.0,
98
+ "count": 200
99
+ },
100
+ "Agent.AerialWildfireSuppression.FiretooClosetoCity.mean": {
101
+ "value": 0.0,
102
+ "min": 0.0,
103
+ "max": 1.0,
104
+ "count": 200
105
+ },
106
+ "Agent.AerialWildfireSuppression.FiretooClosetoCity.sum": {
107
+ "value": 0.0,
108
+ "min": 0.0,
109
+ "max": 6.0,
110
+ "count": 200
111
+ },
112
+ "Agent.AerialWildfireSuppression.CrashCount.mean": {
113
+ "value": 0.0,
114
+ "min": 0.0,
115
+ "max": 1.0,
116
+ "count": 200
117
+ },
118
+ "Agent.AerialWildfireSuppression.CrashCount.sum": {
119
+ "value": 0.0,
120
+ "min": 0.0,
121
+ "max": 39.0,
122
+ "count": 200
123
+ },
124
+ "Agent.Environment.LessonNumber.difficulty.mean": {
125
+ "value": 0.0,
126
+ "min": 0.0,
127
+ "max": 0.0,
128
+ "count": 200
129
+ },
130
+ "Agent.Environment.LessonNumber.difficulty.sum": {
131
+ "value": 0.0,
132
+ "min": 0.0,
133
+ "max": 0.0,
134
+ "count": 200
135
+ },
136
+ "Agent.Environment.LessonNumber.task.mean": {
137
+ "value": 0.0,
138
+ "min": 0.0,
139
+ "max": 0.0,
140
+ "count": 200
141
+ },
142
+ "Agent.Environment.LessonNumber.task.sum": {
143
+ "value": 0.0,
144
+ "min": 0.0,
145
+ "max": 0.0,
146
+ "count": 200
147
+ },
148
+ "Agent.Environment.EpisodeLength.mean": {
149
+ "value": 2999.0,
150
+ "min": 199.0,
151
+ "max": 2999.0,
152
+ "count": 200
153
+ },
154
+ "Agent.Environment.EpisodeLength.sum": {
155
+ "value": 8997.0,
156
+ "min": 597.0,
157
+ "max": 17550.0,
158
+ "count": 200
159
+ },
160
+ "Agent.Step.mean": {
161
+ "value": 1799754.0,
162
+ "min": 8730.0,
163
+ "max": 1799754.0,
164
+ "count": 200
165
+ },
166
+ "Agent.Step.sum": {
167
+ "value": 1799754.0,
168
+ "min": 8730.0,
169
+ "max": 1799754.0,
170
+ "count": 200
171
+ },
172
+ "Agent.Policy.ExtrinsicValueEstimate.mean": {
173
+ "value": 231.97256469726562,
174
+ "min": 3.9347245693206787,
175
+ "max": 375.9845275878906,
176
+ "count": 200
177
+ },
178
+ "Agent.Policy.ExtrinsicValueEstimate.sum": {
179
+ "value": 1159.86279296875,
180
+ "min": 141.65008544921875,
181
+ "max": 3966.137939453125,
182
+ "count": 200
183
+ },
184
+ "Agent.Environment.CumulativeReward.mean": {
185
+ "value": 1801.0,
186
+ "min": 256.0,
187
+ "max": 7543.0,
188
+ "count": 200
189
+ },
190
+ "Agent.Environment.CumulativeReward.sum": {
191
+ "value": 9005.0,
192
+ "min": 914.0,
193
+ "max": 24796.0,
194
+ "count": 200
195
+ },
196
+ "Agent.Policy.ExtrinsicReward.mean": {
197
+ "value": 1801.0,
198
+ "min": 256.0,
199
+ "max": 7543.0,
200
+ "count": 200
201
+ },
202
+ "Agent.Policy.ExtrinsicReward.sum": {
203
+ "value": 9005.0,
204
+ "min": 914.0,
205
+ "max": 24796.0,
206
+ "count": 200
207
+ },
208
+ "Agent.Losses.PolicyLoss.mean": {
209
+ "value": 0.04910632317885757,
210
+ "min": 0.039386254708296146,
211
+ "max": 0.06071123640673856,
212
+ "count": 188
213
+ },
214
+ "Agent.Losses.PolicyLoss.sum": {
215
+ "value": 0.04910632317885757,
216
+ "min": 0.039386254708296146,
217
+ "max": 0.11390054475752606,
218
+ "count": 188
219
+ },
220
+ "Agent.Losses.ValueLoss.mean": {
221
+ "value": 8828.392810872396,
222
+ "min": 436.81949511869453,
223
+ "max": 24311.892178622158,
224
+ "count": 188
225
+ },
226
+ "Agent.Losses.ValueLoss.sum": {
227
+ "value": 8828.392810872396,
228
+ "min": 436.81949511869453,
229
+ "max": 38126.060936143665,
230
+ "count": 188
231
+ },
232
+ "Agent.Policy.LearningRate.mean": {
233
+ "value": 1.6990994336666724e-06,
234
+ "min": 1.6990994336666724e-06,
235
+ "max": 0.00029916450027850003,
236
+ "count": 188
237
+ },
238
+ "Agent.Policy.LearningRate.sum": {
239
+ "value": 1.6990994336666724e-06,
240
+ "min": 1.6990994336666724e-06,
241
+ "max": 0.000596064001312,
242
+ "count": 188
243
+ },
244
+ "Agent.Policy.Epsilon.mean": {
245
+ "value": 0.10056633333333331,
246
+ "min": 0.10056633333333331,
247
+ "max": 0.19972150000000002,
248
+ "count": 188
249
+ },
250
+ "Agent.Policy.Epsilon.sum": {
251
+ "value": 0.10056633333333331,
252
+ "min": 0.10056633333333331,
253
+ "max": 0.39868800000000004,
254
+ "count": 188
255
+ },
256
+ "Agent.Policy.Beta.mean": {
257
+ "value": 3.826003333333344e-05,
258
+ "min": 3.826003333333344e-05,
259
+ "max": 0.00498610285,
260
+ "count": 188
261
+ },
262
+ "Agent.Policy.Beta.sum": {
263
+ "value": 3.826003333333344e-05,
264
+ "min": 3.826003333333344e-05,
265
+ "max": 0.009934531199999999,
266
+ "count": 188
267
+ },
268
+ "Agent.IsTraining.mean": {
269
+ "value": 1.0,
270
+ "min": 1.0,
271
+ "max": 1.0,
272
+ "count": 200
273
+ },
274
+ "Agent.IsTraining.sum": {
275
+ "value": 1.0,
276
+ "min": 1.0,
277
+ "max": 1.0,
278
+ "count": 200
279
+ }
280
+ },
281
+ "metadata": {
282
+ "timer_format_version": "0.1.0",
283
+ "start_time_seconds": "1718245818",
284
+ "python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
285
+ "command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/AerialWildfireSuppression_difficulty_1_task_2_run_id_1_train.yaml --run-id=AerialWildfireSuppression/train/AerialWildfireSuppression_difficulty_1_task_2_run_id_1_train --base-port 5009",
286
+ "mlagents_version": "0.30.0",
287
+ "mlagents_envs_version": "0.30.0",
288
+ "communication_protocol_version": "1.5.0",
289
+ "pytorch_version": "1.7.1+cu110",
290
+ "numpy_version": "1.21.0",
291
+ "end_time_seconds": "1718249870"
292
+ },
293
+ "total": 4052.5219454,
294
+ "count": 1,
295
+ "self": 0.23108759999968242,
296
+ "children": {
297
+ "run_training.setup": {
298
+ "total": 0.04894759999999998,
299
+ "count": 1,
300
+ "self": 0.04894759999999998
301
+ },
302
+ "TrainerController.start_learning": {
303
+ "total": 4052.2419102000003,
304
+ "count": 1,
305
+ "self": 8.500973799985331,
306
+ "children": {
307
+ "TrainerController._reset_env": {
308
+ "total": 1.9733737000000002,
309
+ "count": 1,
310
+ "self": 1.9733737000000002
311
+ },
312
+ "TrainerController.advance": {
313
+ "total": 4041.6881784000143,
314
+ "count": 600927,
315
+ "self": 7.663409100039644,
316
+ "children": {
317
+ "env_step": {
318
+ "total": 4034.0247692999747,
319
+ "count": 600927,
320
+ "self": 2126.388593300021,
321
+ "children": {
322
+ "SubprocessEnvManager._take_step": {
323
+ "total": 1901.4516493999565,
324
+ "count": 600927,
325
+ "self": 16.025169300095058,
326
+ "children": {
327
+ "TorchPolicy.evaluate": {
328
+ "total": 1885.4264800998615,
329
+ "count": 600927,
330
+ "self": 1885.4264800998615
331
+ }
332
+ }
333
+ },
334
+ "workers": {
335
+ "total": 6.184526599997399,
336
+ "count": 600927,
337
+ "self": 0.0,
338
+ "children": {
339
+ "worker_root": {
340
+ "total": 4040.417861699945,
341
+ "count": 600927,
342
+ "is_parallel": true,
343
+ "self": 2322.3952812999632,
344
+ "children": {
345
+ "steps_from_proto": {
346
+ "total": 0.0059123000000000925,
347
+ "count": 1,
348
+ "is_parallel": true,
349
+ "self": 0.00010810000000027742,
350
+ "children": {
351
+ "_process_maybe_compressed_observation": {
352
+ "total": 0.005717199999999867,
353
+ "count": 2,
354
+ "is_parallel": true,
355
+ "self": 4.6599999999896724e-05,
356
+ "children": {
357
+ "_observation_to_np_array": {
358
+ "total": 0.00567059999999997,
359
+ "count": 3,
360
+ "is_parallel": true,
361
+ "self": 2.959999999996299e-05,
362
+ "children": {
363
+ "process_pixels": {
364
+ "total": 0.005641000000000007,
365
+ "count": 3,
366
+ "is_parallel": true,
367
+ "self": 0.00018229999999985758,
368
+ "children": {
369
+ "image_decompress": {
370
+ "total": 0.0054587000000001495,
371
+ "count": 3,
372
+ "is_parallel": true,
373
+ "self": 0.0054587000000001495
374
+ }
375
+ }
376
+ }
377
+ }
378
+ }
379
+ }
380
+ },
381
+ "_process_rank_one_or_two_observation": {
382
+ "total": 8.699999999994823e-05,
383
+ "count": 2,
384
+ "is_parallel": true,
385
+ "self": 8.699999999994823e-05
386
+ }
387
+ }
388
+ },
389
+ "UnityEnvironment.step": {
390
+ "total": 1718.0166680999816,
391
+ "count": 600927,
392
+ "is_parallel": true,
393
+ "self": 30.266606800067393,
394
+ "children": {
395
+ "UnityEnvironment._generate_step_input": {
396
+ "total": 33.83795599998073,
397
+ "count": 600927,
398
+ "is_parallel": true,
399
+ "self": 33.83795599998073
400
+ },
401
+ "communicator.exchange": {
402
+ "total": 1415.6147716999455,
403
+ "count": 600927,
404
+ "is_parallel": true,
405
+ "self": 1415.6147716999455
406
+ },
407
+ "steps_from_proto": {
408
+ "total": 238.29733359998806,
409
+ "count": 600927,
410
+ "is_parallel": true,
411
+ "self": 49.33056579975005,
412
+ "children": {
413
+ "_process_maybe_compressed_observation": {
414
+ "total": 164.8242394001996,
415
+ "count": 1201854,
416
+ "is_parallel": true,
417
+ "self": 15.283221000176553,
418
+ "children": {
419
+ "_observation_to_np_array": {
420
+ "total": 149.54101840002303,
421
+ "count": 1804095,
422
+ "is_parallel": true,
423
+ "self": 13.311657700172276,
424
+ "children": {
425
+ "process_pixels": {
426
+ "total": 136.22936069985076,
427
+ "count": 1804095,
428
+ "is_parallel": true,
429
+ "self": 49.510362599892815,
430
+ "children": {
431
+ "image_decompress": {
432
+ "total": 86.71899809995794,
433
+ "count": 1804095,
434
+ "is_parallel": true,
435
+ "self": 86.71899809995794
436
+ }
437
+ }
438
+ }
439
+ }
440
+ }
441
+ }
442
+ },
443
+ "_process_rank_one_or_two_observation": {
444
+ "total": 24.142528400038415,
445
+ "count": 1201854,
446
+ "is_parallel": true,
447
+ "self": 24.142528400038415
448
+ }
449
+ }
450
+ }
451
+ }
452
+ }
453
+ }
454
+ }
455
+ }
456
+ }
457
+ }
458
+ }
459
+ }
460
+ },
461
+ "trainer_threads": {
462
+ "total": 2.66000001829525e-05,
463
+ "count": 1,
464
+ "self": 2.66000001829525e-05,
465
+ "children": {
466
+ "thread_root": {
467
+ "total": 0.0,
468
+ "count": 0,
469
+ "is_parallel": true,
470
+ "self": 0.0,
471
+ "children": {
472
+ "trainer_advance": {
473
+ "total": 4048.2443355000646,
474
+ "count": 208273,
475
+ "is_parallel": true,
476
+ "self": 8.58529970007794,
477
+ "children": {
478
+ "process_trajectory": {
479
+ "total": 3340.2826587999853,
480
+ "count": 208273,
481
+ "is_parallel": true,
482
+ "self": 3339.972862099985,
483
+ "children": {
484
+ "RLTrainer._checkpoint": {
485
+ "total": 0.3097967000001063,
486
+ "count": 3,
487
+ "is_parallel": true,
488
+ "self": 0.3097967000001063
489
+ }
490
+ }
491
+ },
492
+ "_update_policy": {
493
+ "total": 699.3763770000014,
494
+ "count": 232,
495
+ "is_parallel": true,
496
+ "self": 145.26156530000458,
497
+ "children": {
498
+ "TorchPPOOptimizer.update": {
499
+ "total": 554.1148116999968,
500
+ "count": 20700,
501
+ "is_parallel": true,
502
+ "self": 554.1148116999968
503
+ }
504
+ }
505
+ }
506
+ }
507
+ }
508
+ }
509
+ }
510
+ }
511
+ },
512
+ "TrainerController._save_models": {
513
+ "total": 0.07935770000040065,
514
+ "count": 1,
515
+ "self": 0.015490500000396423,
516
+ "children": {
517
+ "RLTrainer._checkpoint": {
518
+ "total": 0.06386720000000423,
519
+ "count": 1,
520
+ "self": 0.06386720000000423
521
+ }
522
+ }
523
+ }
524
+ }
525
+ }
526
+ }
527
+ }
run_logs/training_status.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "difficulty": {
3
+ "lesson_num": 0
4
+ },
5
+ "task": {
6
+ "lesson_num": 0
7
+ },
8
+ "Agent": {
9
+ "checkpoints": [
10
+ {
11
+ "steps": 497304,
12
+ "file_path": "results\\AerialWildfireSuppression/train/AerialWildfireSuppression_difficulty_1_task_2_run_id_1_train\\Agent\\Agent-497304.onnx",
13
+ "reward": -150.0,
14
+ "creation_time": 1718246934.2450087,
15
+ "auxillary_file_paths": [
16
+ "results\\AerialWildfireSuppression/train/AerialWildfireSuppression_difficulty_1_task_2_run_id_1_train\\Agent\\Agent-497304.pt"
17
+ ]
18
+ },
19
+ {
20
+ "steps": 998372,
21
+ "file_path": "results\\AerialWildfireSuppression/train/AerialWildfireSuppression_difficulty_1_task_2_run_id_1_train\\Agent\\Agent-998372.onnx",
22
+ "reward": 1758.4285714285713,
23
+ "creation_time": 1718248056.5177968,
24
+ "auxillary_file_paths": [
25
+ "results\\AerialWildfireSuppression/train/AerialWildfireSuppression_difficulty_1_task_2_run_id_1_train\\Agent\\Agent-998372.pt"
26
+ ]
27
+ },
28
+ {
29
+ "steps": 1499754,
30
+ "file_path": "results\\AerialWildfireSuppression/train/AerialWildfireSuppression_difficulty_1_task_2_run_id_1_train\\Agent\\Agent-1499754.onnx",
31
+ "reward": null,
32
+ "creation_time": 1718249207.5044103,
33
+ "auxillary_file_paths": [
34
+ "results\\AerialWildfireSuppression/train/AerialWildfireSuppression_difficulty_1_task_2_run_id_1_train\\Agent\\Agent-1499754.pt"
35
+ ]
36
+ },
37
+ {
38
+ "steps": 1802754,
39
+ "file_path": "results\\AerialWildfireSuppression/train/AerialWildfireSuppression_difficulty_1_task_2_run_id_1_train\\Agent\\Agent-1802754.onnx",
40
+ "reward": 2274.6666666666665,
41
+ "creation_time": 1718249870.6935065,
42
+ "auxillary_file_paths": [
43
+ "results\\AerialWildfireSuppression/train/AerialWildfireSuppression_difficulty_1_task_2_run_id_1_train\\Agent\\Agent-1802754.pt"
44
+ ]
45
+ }
46
+ ],
47
+ "final_checkpoint": {
48
+ "steps": 1802754,
49
+ "file_path": "results\\AerialWildfireSuppression/train/AerialWildfireSuppression_difficulty_1_task_2_run_id_1_train\\Agent.onnx",
50
+ "reward": 2274.6666666666665,
51
+ "creation_time": 1718249870.6935065,
52
+ "auxillary_file_paths": [
53
+ "results\\AerialWildfireSuppression/train/AerialWildfireSuppression_difficulty_1_task_2_run_id_1_train\\Agent\\Agent-1802754.pt"
54
+ ]
55
+ }
56
+ },
57
+ "metadata": {
58
+ "stats_format_version": "0.3.0",
59
+ "mlagents_version": "0.30.0",
60
+ "torch_version": "1.7.1+cu110"
61
+ }
62
+ }