philippds commited on
Commit
77ac9c1
1 Parent(s): 7e2673c

Upload 13 files

Browse files
Agent.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:820f7603db69271e707aa41907254b3e1f9003fb5574e361478efb6126c27396
3
+ size 562656
Agent/Agent-1200303.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:820f7603db69271e707aa41907254b3e1f9003fb5574e361478efb6126c27396
3
+ size 562656
Agent/Agent-1200303.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:482942a735a537f4653a6a22014e24e8d2271c6cbf887718ed32d3a1c6a4b301
3
+ size 4530031
Agent/Agent-499976.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c00f6f97f67252f5e53850fd3162892c8895566d1403c81404fecdf33378d0a
3
+ size 562656
Agent/Agent-499976.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b493cf202f1466537a1f2da437fb6c6271748424ba7f131da5baa119989d87a
3
+ size 4530031
Agent/Agent-999798.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0eeda0a6d7a3caf11b54bab06f9ff67f67417f322ec63c401c39a585758c21dd
3
+ size 562656
Agent/Agent-999798.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17887d35ac9613c233734b68643452095edd008dc8abac8a77fd4ceda4bb1e87
3
+ size 4530031
Agent/checkpoint.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:482942a735a537f4653a6a22014e24e8d2271c6cbf887718ed32d3a1c6a4b301
3
+ size 4530031
Agent/events.out.tfevents.1717239443.RICHARD.26284.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c06be5b609c3269bd8a91c4e492540f64a10e5913439d671a6c68c1fc88ded0
3
+ size 2358985
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: hivex
3
+ original_train_name: DroneBasedReforestation_difficulty_4_task_0_run_id_1_train
4
+ tags:
5
+ - hivex
6
+ - hivex-drone-based-reforestation
7
+ - reinforcement-learning
8
+ - multi-agent-reinforcement-learning
9
+ model-index:
10
+ - name: hivex-DBR-PPO-baseline-task-0-difficulty-4
11
+ results:
12
+ - task:
13
+ type: main-task
14
+ name: main_task
15
+ task-id: 0
16
+ difficulty-id: 4
17
+ dataset:
18
+ name: hivex-drone-based-reforestation
19
+ type: hivex-drone-based-reforestation
20
+ metrics:
21
+ - type: cumulative_distance_reward
22
+ value: 2.2025150191783904 +/- 0.8048838225723474
23
+ name: Cumulative Distance Reward
24
+ verified: true
25
+ - type: cumulative_distance_until_tree_drop
26
+ value: 70.3463491821289 +/- 13.872446958274292
27
+ name: Cumulative Distance Until Tree Drop
28
+ verified: true
29
+ - type: cumulative_distance_to_existing_trees
30
+ value: 63.00524223327637 +/- 14.558269040918253
31
+ name: Cumulative Distance to Existing Trees
32
+ verified: true
33
+ - type: cumulative_normalized_distance_until_tree_drop
34
+ value: 0.2202515023946762 +/- 0.08048838503417874
35
+ name: Cumulative Normalized Distance Until Tree Drop
36
+ verified: true
37
+ - type: cumulative_tree_drop_reward
38
+ value: 5.9483204627037045 +/- 2.3428193553001173
39
+ name: Cumulative Tree Drop Reward
40
+ verified: true
41
+ - type: out_of_energy_count
42
+ value: 0.9133015894889831 +/- 0.07074894155629612
43
+ name: Out of Energy Count
44
+ verified: true
45
+ - type: recharge_energy_count
46
+ value: 11.649015922546386 +/- 1.7220237341546332
47
+ name: Recharge Energy Count
48
+ verified: true
49
+ - type: tree_drop_count
50
+ value: 1.0417143070697785 +/- 0.08413648907043028
51
+ name: Tree Drop Count
52
+ verified: true
53
+ - type: cumulative_reward
54
+ value: 9.298489372730256 +/- 3.9538719454855293
55
+ name: Cumulative Reward
56
+ verified: true
57
+ ---
58
+
59
+ This model serves as the baseline for the **Drone-Based Reforestation** environment, trained and tested on task <code>0</code> with difficulty <code>4</code> using the Proximal Policy Optimization (PPO) algorithm.<br><br>Environment: **Drone-Based Reforestation**<br>Task: <code>0</code><br>Difficulty: <code>4</code><br>Algorithm: <code>PPO</code><br>Episode Length: <code>2000</code><br>Training <code>max_steps</code>: <code>1200000</code><br>Testing <code>max_steps</code>: <code>300000</code><br><br>Train & Test [Scripts](https://github.com/hivex-research/hivex)<br>Download the [Environment](https://github.com/hivex-research/hivex-environments)
configuration.yaml ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default_settings: null
2
+ behaviors:
3
+ Agent:
4
+ trainer_type: ppo
5
+ hyperparameters:
6
+ batch_size: 1024
7
+ buffer_size: 8192
8
+ learning_rate: 0.0003
9
+ beta: 0.005
10
+ epsilon: 0.2
11
+ lambd: 0.95
12
+ num_epoch: 3
13
+ shared_critic: false
14
+ learning_rate_schedule: linear
15
+ beta_schedule: linear
16
+ epsilon_schedule: linear
17
+ network_settings:
18
+ normalize: false
19
+ hidden_units: 128
20
+ num_layers: 2
21
+ vis_encode_type: resnet
22
+ memory: null
23
+ goal_conditioning_type: hyper
24
+ deterministic: false
25
+ reward_signals:
26
+ curiosity:
27
+ gamma: 0.99
28
+ strength: 0.1
29
+ network_settings:
30
+ normalize: false
31
+ hidden_units: 128
32
+ num_layers: 2
33
+ vis_encode_type: resnet
34
+ memory: null
35
+ goal_conditioning_type: hyper
36
+ deterministic: false
37
+ learning_rate: 0.0003
38
+ encoding_size: 256
39
+ extrinsic:
40
+ gamma: 0.99
41
+ strength: 0.9
42
+ network_settings:
43
+ normalize: false
44
+ hidden_units: 128
45
+ num_layers: 2
46
+ vis_encode_type: resnet
47
+ memory: null
48
+ goal_conditioning_type: hyper
49
+ deterministic: false
50
+ init_path: null
51
+ keep_checkpoints: 5
52
+ checkpoint_interval: 500000
53
+ max_steps: 1200000
54
+ time_horizon: 8192
55
+ summary_freq: 6000
56
+ threaded: true
57
+ self_play: null
58
+ behavioral_cloning: null
59
+ env_settings:
60
+ env_path: c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/dev_environments/Hivex_DroneBasedReforestation_win
61
+ env_args: null
62
+ base_port: 5007
63
+ num_envs: 1
64
+ num_areas: 1
65
+ seed: 5000
66
+ max_lifetime_restarts: 10
67
+ restarts_rate_limit_n: 1
68
+ restarts_rate_limit_period_s: 60
69
+ engine_settings:
70
+ width: 84
71
+ height: 84
72
+ quality_level: 5
73
+ time_scale: 20
74
+ target_frame_rate: -1
75
+ capture_frame_rate: 60
76
+ no_graphics: true
77
+ environment_parameters:
78
+ difficulty:
79
+ curriculum:
80
+ - value:
81
+ sampler_type: constant
82
+ sampler_parameters:
83
+ seed: 5000
84
+ value: 4
85
+ name: difficulty
86
+ completion_criteria: null
87
+ task:
88
+ curriculum:
89
+ - value:
90
+ sampler_type: constant
91
+ sampler_parameters:
92
+ seed: 5001
93
+ value: 0
94
+ name: task
95
+ completion_criteria: null
96
+ checkpoint_settings:
97
+ run_id: DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_0_run_id_1_train
98
+ initialize_from: null
99
+ load_model: false
100
+ resume: false
101
+ force: false
102
+ train_model: false
103
+ inference: false
104
+ results_dir: results
105
+ torch_settings:
106
+ device: null
107
+ debug: false
run_logs/timers.json ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "root",
3
+ "gauges": {
4
+ "Agent.Policy.Entropy.mean": {
5
+ "value": 1.4380470514297485,
6
+ "min": 1.4210506677627563,
7
+ "max": 1.440159797668457,
8
+ "count": 200
9
+ },
10
+ "Agent.Policy.Entropy.sum": {
11
+ "value": 8511.80078125,
12
+ "min": 7452.4091796875,
13
+ "max": 10234.216796875,
14
+ "count": 200
15
+ },
16
+ "Agent.DroneBasedReforestation.TreeDropCount.mean": {
17
+ "value": 1.0666666666666667,
18
+ "min": 0.6,
19
+ "max": 1.3333333333333333,
20
+ "count": 200
21
+ },
22
+ "Agent.DroneBasedReforestation.TreeDropCount.sum": {
23
+ "value": 16.0,
24
+ "min": 9.0,
25
+ "max": 24.0,
26
+ "count": 200
27
+ },
28
+ "Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
29
+ "value": 11.4,
30
+ "min": 9.055555555555555,
31
+ "max": 56.72222222222222,
32
+ "count": 200
33
+ },
34
+ "Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
35
+ "value": 171.0,
36
+ "min": 140.0,
37
+ "max": 1021.0,
38
+ "count": 200
39
+ },
40
+ "Agent.DroneBasedReforestation.SaveLocationCount.mean": {
41
+ "value": 0.0,
42
+ "min": 0.0,
43
+ "max": 0.0,
44
+ "count": 200
45
+ },
46
+ "Agent.DroneBasedReforestation.SaveLocationCount.sum": {
47
+ "value": 0.0,
48
+ "min": 0.0,
49
+ "max": 0.0,
50
+ "count": 200
51
+ },
52
+ "Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
53
+ "value": 0.9333333333333333,
54
+ "min": 0.4666666666666667,
55
+ "max": 1.0,
56
+ "count": 200
57
+ },
58
+ "Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
59
+ "value": 14.0,
60
+ "min": 7.0,
61
+ "max": 18.0,
62
+ "count": 200
63
+ },
64
+ "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
65
+ "value": 55.36326599121094,
66
+ "min": 15.930303573608398,
67
+ "max": 96.99653180440266,
68
+ "count": 200
69
+ },
70
+ "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
71
+ "value": 830.4489898681641,
72
+ "min": 238.95455360412598,
73
+ "max": 1546.7922191619873,
74
+ "count": 200
75
+ },
76
+ "Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
77
+ "value": 9.80856335957845,
78
+ "min": 0.7838140626748403,
79
+ "max": 10.676346683502198,
80
+ "count": 200
81
+ },
82
+ "Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
83
+ "value": 147.12845039367676,
84
+ "min": 11.757210940122604,
85
+ "max": 160.14520025253296,
86
+ "count": 200
87
+ },
88
+ "Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
89
+ "value": 1.9161027113596598,
90
+ "min": 0.19702432552973428,
91
+ "max": 3.0707154936260648,
92
+ "count": 200
93
+ },
94
+ "Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
95
+ "value": 28.741540670394897,
96
+ "min": 2.9553648829460144,
97
+ "max": 55.272878885269165,
98
+ "count": 200
99
+ },
100
+ "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
101
+ "value": 0.19161026726166408,
102
+ "min": 0.019702432056268055,
103
+ "max": 0.3070715483691957,
104
+ "count": 200
105
+ },
106
+ "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
107
+ "value": 2.874154008924961,
108
+ "min": 0.29553648084402084,
109
+ "max": 5.527287870645523,
110
+ "count": 200
111
+ },
112
+ "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
113
+ "value": 46.57113973299662,
114
+ "min": 28.612841029961903,
115
+ "max": 110.09242121378581,
116
+ "count": 200
117
+ },
118
+ "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
119
+ "value": 698.5670959949493,
120
+ "min": 459.6895525455475,
121
+ "max": 1726.5684146881104,
122
+ "count": 200
123
+ },
124
+ "Agent.Environment.LessonNumber.difficulty.mean": {
125
+ "value": 0.0,
126
+ "min": 0.0,
127
+ "max": 0.0,
128
+ "count": 200
129
+ },
130
+ "Agent.Environment.LessonNumber.difficulty.sum": {
131
+ "value": 0.0,
132
+ "min": 0.0,
133
+ "max": 0.0,
134
+ "count": 200
135
+ },
136
+ "Agent.Environment.LessonNumber.task.mean": {
137
+ "value": 0.0,
138
+ "min": 0.0,
139
+ "max": 0.0,
140
+ "count": 200
141
+ },
142
+ "Agent.Environment.LessonNumber.task.sum": {
143
+ "value": 0.0,
144
+ "min": 0.0,
145
+ "max": 0.0,
146
+ "count": 200
147
+ },
148
+ "Agent.Environment.EpisodeLength.mean": {
149
+ "value": 393.2,
150
+ "min": 337.8333333333333,
151
+ "max": 399.0,
152
+ "count": 200
153
+ },
154
+ "Agent.Environment.EpisodeLength.sum": {
155
+ "value": 5898.0,
156
+ "min": 5160.0,
157
+ "max": 7173.0,
158
+ "count": 200
159
+ },
160
+ "Agent.Step.mean": {
161
+ "value": 1199913.0,
162
+ "min": 5987.0,
163
+ "max": 1199913.0,
164
+ "count": 200
165
+ },
166
+ "Agent.Step.sum": {
167
+ "value": 1199913.0,
168
+ "min": 5987.0,
169
+ "max": 1199913.0,
170
+ "count": 200
171
+ },
172
+ "Agent.Policy.CuriosityValueEstimate.mean": {
173
+ "value": 0.4023384749889374,
174
+ "min": 0.02915876917541027,
175
+ "max": 0.9292956590652466,
176
+ "count": 200
177
+ },
178
+ "Agent.Policy.CuriosityValueEstimate.sum": {
179
+ "value": 6.035077095031738,
180
+ "min": 0.4373815357685089,
181
+ "max": 15.34562873840332,
182
+ "count": 200
183
+ },
184
+ "Agent.Policy.ExtrinsicValueEstimate.mean": {
185
+ "value": 0.6397857069969177,
186
+ "min": 0.06568823009729385,
187
+ "max": 1.130825161933899,
188
+ "count": 200
189
+ },
190
+ "Agent.Policy.ExtrinsicValueEstimate.sum": {
191
+ "value": 9.596785545349121,
192
+ "min": 1.1166999340057373,
193
+ "max": 18.093202590942383,
194
+ "count": 200
195
+ },
196
+ "Agent.Environment.CumulativeReward.mean": {
197
+ "value": 12.295849800109863,
198
+ "min": -0.4003447631994883,
199
+ "max": 16.644680166244505,
200
+ "count": 200
201
+ },
202
+ "Agent.Environment.CumulativeReward.sum": {
203
+ "value": 184.43774700164795,
204
+ "min": -6.005171447992325,
205
+ "max": 253.88925862312317,
206
+ "count": 200
207
+ },
208
+ "Agent.Policy.CuriosityReward.mean": {
209
+ "value": 1.5010416428248088,
210
+ "min": 0.0,
211
+ "max": 13.214636166890463,
212
+ "count": 200
213
+ },
214
+ "Agent.Policy.CuriosityReward.sum": {
215
+ "value": 22.51562464237213,
216
+ "min": 0.0,
217
+ "max": 198.21954250335693,
218
+ "count": 200
219
+ },
220
+ "Agent.Policy.ExtrinsicReward.mean": {
221
+ "value": 11.066264327367147,
222
+ "min": -0.3603106955687205,
223
+ "max": 14.980211734771729,
224
+ "count": 200
225
+ },
226
+ "Agent.Policy.ExtrinsicReward.sum": {
227
+ "value": 165.9939649105072,
228
+ "min": -5.4046604335308075,
229
+ "max": 228.50032329559326,
230
+ "count": 200
231
+ },
232
+ "Agent.IsTraining.mean": {
233
+ "value": 1.0,
234
+ "min": 1.0,
235
+ "max": 1.0,
236
+ "count": 200
237
+ },
238
+ "Agent.IsTraining.sum": {
239
+ "value": 1.0,
240
+ "min": 1.0,
241
+ "max": 1.0,
242
+ "count": 200
243
+ },
244
+ "Agent.Losses.PolicyLoss.mean": {
245
+ "value": 0.020519517420325428,
246
+ "min": 0.013041299698805367,
247
+ "max": 0.033138952873371263,
248
+ "count": 137
249
+ },
250
+ "Agent.Losses.PolicyLoss.sum": {
251
+ "value": 0.020519517420325428,
252
+ "min": 0.013041299698805367,
253
+ "max": 0.033138952873371263,
254
+ "count": 137
255
+ },
256
+ "Agent.Losses.ValueLoss.mean": {
257
+ "value": 1.3046319286028545,
258
+ "min": 0.08780664003764589,
259
+ "max": 1.3675937431829948,
260
+ "count": 137
261
+ },
262
+ "Agent.Losses.ValueLoss.sum": {
263
+ "value": 1.3046319286028545,
264
+ "min": 0.08780664003764589,
265
+ "max": 1.3675937431829948,
266
+ "count": 137
267
+ },
268
+ "Agent.Policy.LearningRate.mean": {
269
+ "value": 5.108498297500153e-07,
270
+ "min": 5.108498297500153e-07,
271
+ "max": 0.00029780325073225,
272
+ "count": 137
273
+ },
274
+ "Agent.Policy.LearningRate.sum": {
275
+ "value": 5.108498297500153e-07,
276
+ "min": 5.108498297500153e-07,
277
+ "max": 0.00029780325073225,
278
+ "count": 137
279
+ },
280
+ "Agent.Policy.Epsilon.mean": {
281
+ "value": 0.10017025000000002,
282
+ "min": 0.10017025000000002,
283
+ "max": 0.19926775000000005,
284
+ "count": 137
285
+ },
286
+ "Agent.Policy.Epsilon.sum": {
287
+ "value": 0.10017025000000002,
288
+ "min": 0.10017025000000002,
289
+ "max": 0.19926775000000005,
290
+ "count": 137
291
+ },
292
+ "Agent.Policy.Beta.mean": {
293
+ "value": 1.8495475000000255e-05,
294
+ "min": 1.8495475000000255e-05,
295
+ "max": 0.004963460725,
296
+ "count": 137
297
+ },
298
+ "Agent.Policy.Beta.sum": {
299
+ "value": 1.8495475000000255e-05,
300
+ "min": 1.8495475000000255e-05,
301
+ "max": 0.004963460725,
302
+ "count": 137
303
+ },
304
+ "Agent.Losses.CuriosityForwardLoss.mean": {
305
+ "value": 0.03610416315495968,
306
+ "min": 0.030629316137896642,
307
+ "max": 0.6027635087569555,
308
+ "count": 137
309
+ },
310
+ "Agent.Losses.CuriosityForwardLoss.sum": {
311
+ "value": 0.03610416315495968,
312
+ "min": 0.030629316137896642,
313
+ "max": 0.6027635087569555,
314
+ "count": 137
315
+ },
316
+ "Agent.Losses.CuriosityInverseLoss.mean": {
317
+ "value": 1.896045779188474,
318
+ "min": 1.7552975515524547,
319
+ "max": 3.315477500359217,
320
+ "count": 137
321
+ },
322
+ "Agent.Losses.CuriosityInverseLoss.sum": {
323
+ "value": 1.896045779188474,
324
+ "min": 1.7552975515524547,
325
+ "max": 3.315477500359217,
326
+ "count": 137
327
+ }
328
+ },
329
+ "metadata": {
330
+ "timer_format_version": "0.1.0",
331
+ "start_time_seconds": "1717239440",
332
+ "python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
333
+ "command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_4_task_0_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_0_run_id_1_train --base-port 5007",
334
+ "mlagents_version": "0.30.0",
335
+ "mlagents_envs_version": "0.30.0",
336
+ "communication_protocol_version": "1.5.0",
337
+ "pytorch_version": "1.7.1+cu110",
338
+ "numpy_version": "1.21.0",
339
+ "end_time_seconds": "1717243110"
340
+ },
341
+ "total": 3669.7381731,
342
+ "count": 1,
343
+ "self": 0.28814790000024004,
344
+ "children": {
345
+ "run_training.setup": {
346
+ "total": 0.050848799999999916,
347
+ "count": 1,
348
+ "self": 0.050848799999999916
349
+ },
350
+ "TrainerController.start_learning": {
351
+ "total": 3669.3991763999998,
352
+ "count": 1,
353
+ "self": 5.227523499976087,
354
+ "children": {
355
+ "TrainerController._reset_env": {
356
+ "total": 3.6653909,
357
+ "count": 1,
358
+ "self": 3.6653909
359
+ },
360
+ "TrainerController.advance": {
361
+ "total": 3660.3280628000234,
362
+ "count": 401040,
363
+ "self": 4.8690916999630645,
364
+ "children": {
365
+ "env_step": {
366
+ "total": 3655.4589711000604,
367
+ "count": 401040,
368
+ "self": 1652.7016028998614,
369
+ "children": {
370
+ "SubprocessEnvManager._take_step": {
371
+ "total": 1999.2849421001135,
372
+ "count": 401040,
373
+ "self": 10.719409600169229,
374
+ "children": {
375
+ "TorchPolicy.evaluate": {
376
+ "total": 1988.5655324999443,
377
+ "count": 400110,
378
+ "self": 1988.5655324999443
379
+ }
380
+ }
381
+ },
382
+ "workers": {
383
+ "total": 3.472426100085382,
384
+ "count": 401040,
385
+ "self": 0.0,
386
+ "children": {
387
+ "worker_root": {
388
+ "total": 3658.9897427999776,
389
+ "count": 401040,
390
+ "is_parallel": true,
391
+ "self": 2215.9914199000195,
392
+ "children": {
393
+ "steps_from_proto": {
394
+ "total": 0.006121300000000218,
395
+ "count": 1,
396
+ "is_parallel": true,
397
+ "self": 0.00010129999999985984,
398
+ "children": {
399
+ "_process_maybe_compressed_observation": {
400
+ "total": 0.005975900000000145,
401
+ "count": 2,
402
+ "is_parallel": true,
403
+ "self": 2.9300000000898052e-05,
404
+ "children": {
405
+ "_observation_to_np_array": {
406
+ "total": 0.005946599999999247,
407
+ "count": 3,
408
+ "is_parallel": true,
409
+ "self": 2.7499999999403002e-05,
410
+ "children": {
411
+ "process_pixels": {
412
+ "total": 0.005919099999999844,
413
+ "count": 3,
414
+ "is_parallel": true,
415
+ "self": 0.00022789999999961452,
416
+ "children": {
417
+ "image_decompress": {
418
+ "total": 0.005691200000000229,
419
+ "count": 3,
420
+ "is_parallel": true,
421
+ "self": 0.005691200000000229
422
+ }
423
+ }
424
+ }
425
+ }
426
+ }
427
+ }
428
+ },
429
+ "_process_rank_one_or_two_observation": {
430
+ "total": 4.410000000021341e-05,
431
+ "count": 2,
432
+ "is_parallel": true,
433
+ "self": 4.410000000021341e-05
434
+ }
435
+ }
436
+ },
437
+ "UnityEnvironment.step": {
438
+ "total": 1442.9922015999582,
439
+ "count": 401040,
440
+ "is_parallel": true,
441
+ "self": 18.86580499987258,
442
+ "children": {
443
+ "UnityEnvironment._generate_step_input": {
444
+ "total": 20.762399499892027,
445
+ "count": 401040,
446
+ "is_parallel": true,
447
+ "self": 20.762399499892027
448
+ },
449
+ "communicator.exchange": {
450
+ "total": 1255.3583208000516,
451
+ "count": 401040,
452
+ "is_parallel": true,
453
+ "self": 1255.3583208000516
454
+ },
455
+ "steps_from_proto": {
456
+ "total": 148.0056763001418,
457
+ "count": 401040,
458
+ "is_parallel": true,
459
+ "self": 29.713494699777357,
460
+ "children": {
461
+ "_process_maybe_compressed_observation": {
462
+ "total": 105.30966810025639,
463
+ "count": 802080,
464
+ "is_parallel": true,
465
+ "self": 8.615924700225861,
466
+ "children": {
467
+ "_observation_to_np_array": {
468
+ "total": 96.69374340003053,
469
+ "count": 1203444,
470
+ "is_parallel": true,
471
+ "self": 8.426584100211485,
472
+ "children": {
473
+ "process_pixels": {
474
+ "total": 88.26715929981904,
475
+ "count": 1203444,
476
+ "is_parallel": true,
477
+ "self": 41.55240329984778,
478
+ "children": {
479
+ "image_decompress": {
480
+ "total": 46.71475599997126,
481
+ "count": 1203444,
482
+ "is_parallel": true,
483
+ "self": 46.71475599997126
484
+ }
485
+ }
486
+ }
487
+ }
488
+ }
489
+ }
490
+ },
491
+ "_process_rank_one_or_two_observation": {
492
+ "total": 12.982513500108055,
493
+ "count": 802080,
494
+ "is_parallel": true,
495
+ "self": 12.982513500108055
496
+ }
497
+ }
498
+ }
499
+ }
500
+ }
501
+ }
502
+ }
503
+ }
504
+ }
505
+ }
506
+ }
507
+ }
508
+ },
509
+ "trainer_threads": {
510
+ "total": 2.66000001829525e-05,
511
+ "count": 1,
512
+ "self": 2.66000001829525e-05,
513
+ "children": {
514
+ "thread_root": {
515
+ "total": 0.0,
516
+ "count": 0,
517
+ "is_parallel": true,
518
+ "self": 0.0,
519
+ "children": {
520
+ "trainer_advance": {
521
+ "total": 3664.2653862999946,
522
+ "count": 177916,
523
+ "is_parallel": true,
524
+ "self": 3.890928399915083,
525
+ "children": {
526
+ "process_trajectory": {
527
+ "total": 2912.16683060008,
528
+ "count": 177916,
529
+ "is_parallel": true,
530
+ "self": 2911.73211080008,
531
+ "children": {
532
+ "RLTrainer._checkpoint": {
533
+ "total": 0.43471980000003896,
534
+ "count": 2,
535
+ "is_parallel": true,
536
+ "self": 0.43471980000003896
537
+ }
538
+ }
539
+ },
540
+ "_update_policy": {
541
+ "total": 748.2076272999991,
542
+ "count": 137,
543
+ "is_parallel": true,
544
+ "self": 502.2388822000035,
545
+ "children": {
546
+ "TorchPPOOptimizer.update": {
547
+ "total": 245.9687450999956,
548
+ "count": 3393,
549
+ "is_parallel": true,
550
+ "self": 245.9687450999956
551
+ }
552
+ }
553
+ }
554
+ }
555
+ }
556
+ }
557
+ }
558
+ }
559
+ },
560
+ "TrainerController._save_models": {
561
+ "total": 0.1781725999999253,
562
+ "count": 1,
563
+ "self": 0.012462499999855936,
564
+ "children": {
565
+ "RLTrainer._checkpoint": {
566
+ "total": 0.16571010000006936,
567
+ "count": 1,
568
+ "self": 0.16571010000006936
569
+ }
570
+ }
571
+ }
572
+ }
573
+ }
574
+ }
575
+ }
run_logs/training_status.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "difficulty": {
3
+ "lesson_num": 0
4
+ },
5
+ "task": {
6
+ "lesson_num": 0
7
+ },
8
+ "Agent": {
9
+ "checkpoints": [
10
+ {
11
+ "steps": 499976,
12
+ "file_path": "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_0_run_id_1_train\\Agent\\Agent-499976.onnx",
13
+ "reward": 6.806787922978401,
14
+ "creation_time": 1717241027.6436312,
15
+ "auxillary_file_paths": [
16
+ "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_0_run_id_1_train\\Agent\\Agent-499976.pt"
17
+ ]
18
+ },
19
+ {
20
+ "steps": 999798,
21
+ "file_path": "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_0_run_id_1_train\\Agent\\Agent-999798.onnx",
22
+ "reward": 6.595406770706177,
23
+ "creation_time": 1717242512.000112,
24
+ "auxillary_file_paths": [
25
+ "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_0_run_id_1_train\\Agent\\Agent-999798.pt"
26
+ ]
27
+ },
28
+ {
29
+ "steps": 1200303,
30
+ "file_path": "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_0_run_id_1_train\\Agent\\Agent-1200303.onnx",
31
+ "reward": 4.413764258225759,
32
+ "creation_time": 1717243110.1139023,
33
+ "auxillary_file_paths": [
34
+ "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_0_run_id_1_train\\Agent\\Agent-1200303.pt"
35
+ ]
36
+ }
37
+ ],
38
+ "final_checkpoint": {
39
+ "steps": 1200303,
40
+ "file_path": "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_0_run_id_1_train\\Agent.onnx",
41
+ "reward": 4.413764258225759,
42
+ "creation_time": 1717243110.1139023,
43
+ "auxillary_file_paths": [
44
+ "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_0_run_id_1_train\\Agent\\Agent-1200303.pt"
45
+ ]
46
+ }
47
+ },
48
+ "metadata": {
49
+ "stats_format_version": "0.3.0",
50
+ "mlagents_version": "0.30.0",
51
+ "torch_version": "1.7.1+cu110"
52
+ }
53
+ }