philippds commited on
Commit
07e0862
1 Parent(s): 04c03ec

Upload 16 files

Browse files
Agent.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a200d136292ad453becb850928497cc9c40226fa938a4fea4213b1e336da0ef
3
+ size 608107
Agent/Agent-1499962.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae0028ea612828cef4474b16fa21b0efec5dd413126802b3415d899ed93a8335
3
+ size 608107
Agent/Agent-1499962.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49871230204f4763505cde30d9061f3c064301a31a923f6e70faf23855981595
3
+ size 4847687
Agent/Agent-1999995.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:affe1569154fc7730c345fa2c98c94f560ccfa188e4d0077f01b79c16585f346
3
+ size 608107
Agent/Agent-1999995.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5be35c5ef56304e2458609d1ac25c8c83b1e40f6af2c5fa66e1d4653df85088
3
+ size 4847687
Agent/Agent-2499935.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:565a60e082feaa01878798573506e1db36b137d5baea2d3a0960e048305b4704
3
+ size 608107
Agent/Agent-2499935.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96c1d5d92304af04b615205602990334e2249d0d618d7e357db2f6d21de0c86a
3
+ size 4847687
Agent/Agent-2999856.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a200d136292ad453becb850928497cc9c40226fa938a4fea4213b1e336da0ef
3
+ size 608107
Agent/Agent-2999856.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:819e5836fdd8386ac3d27a8295b3b71143920a02b36b7d13cc12c60e6ce8df76
3
+ size 4847687
Agent/Agent-3000012.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a200d136292ad453becb850928497cc9c40226fa938a4fea4213b1e336da0ef
3
+ size 608107
Agent/Agent-3000012.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e997ded553e10763fcf9baffda340849ae1ea873a5e617e06919afc72f3e95c
3
+ size 4847687
Agent/checkpoint.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e997ded553e10763fcf9baffda340849ae1ea873a5e617e06919afc72f3e95c
3
+ size 4847687
Agent/events.out.tfevents.1716785041.RICHARD.39584.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87592ac0a3da71a8d3cf2b2a555c95a1b5a158a090f6229bd65c24fde2be48a5
3
+ size 2545199
configuration.yaml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default_settings: null
2
+ behaviors:
3
+ Agent:
4
+ trainer_type: ppo
5
+ hyperparameters:
6
+ batch_size: 1024
7
+ buffer_size: 10240
8
+ learning_rate: 0.0003
9
+ beta: 0.005
10
+ epsilon: 0.2
11
+ lambd: 0.95
12
+ num_epoch: 3
13
+ shared_critic: false
14
+ learning_rate_schedule: linear
15
+ beta_schedule: linear
16
+ epsilon_schedule: linear
17
+ network_settings:
18
+ normalize: false
19
+ hidden_units: 128
20
+ num_layers: 2
21
+ vis_encode_type: resnet
22
+ memory: null
23
+ goal_conditioning_type: hyper
24
+ deterministic: false
25
+ reward_signals:
26
+ curiosity:
27
+ gamma: 0.99
28
+ strength: 0.1
29
+ network_settings:
30
+ normalize: false
31
+ hidden_units: 128
32
+ num_layers: 2
33
+ vis_encode_type: resnet
34
+ memory: null
35
+ goal_conditioning_type: hyper
36
+ deterministic: false
37
+ learning_rate: 0.0003
38
+ encoding_size: 256
39
+ extrinsic:
40
+ gamma: 0.99
41
+ strength: 0.9
42
+ network_settings:
43
+ normalize: false
44
+ hidden_units: 128
45
+ num_layers: 2
46
+ vis_encode_type: resnet
47
+ memory: null
48
+ goal_conditioning_type: hyper
49
+ deterministic: false
50
+ init_path: null
51
+ keep_checkpoints: 5
52
+ checkpoint_interval: 500000
53
+ max_steps: 3000000
54
+ time_horizon: 10240
55
+ summary_freq: 15000
56
+ threaded: true
57
+ self_play: null
58
+ behavioral_cloning: null
59
+ env_settings:
60
+ env_path: c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/dev_environments/Hivex_OceanPlasticCollection_win
61
+ env_args: null
62
+ base_port: 5008
63
+ num_envs: 1
64
+ num_areas: 1
65
+ seed: 5000
66
+ max_lifetime_restarts: 10
67
+ restarts_rate_limit_n: 1
68
+ restarts_rate_limit_period_s: 60
69
+ engine_settings:
70
+ width: 84
71
+ height: 84
72
+ quality_level: 5
73
+ time_scale: 20
74
+ target_frame_rate: -1
75
+ capture_frame_rate: 60
76
+ no_graphics: true
77
+ environment_parameters:
78
+ task:
79
+ curriculum:
80
+ - value:
81
+ sampler_type: constant
82
+ sampler_parameters:
83
+ seed: 5000
84
+ value: 2
85
+ name: task
86
+ completion_criteria: null
87
+ checkpoint_settings:
88
+ run_id: OceanPlasticCollection/train/OceanPlasticCollection_task_2_run_id_1_train
89
+ initialize_from: null
90
+ load_model: false
91
+ resume: false
92
+ force: false
93
+ train_model: false
94
+ inference: false
95
+ results_dir: results
96
+ torch_settings:
97
+ device: null
98
+ debug: false
run_logs/timers.json ADDED
@@ -0,0 +1,447 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "root",
3
+ "gauges": {
4
+ "Agent.Policy.Entropy.mean": {
5
+ "value": 0.5306253433227539,
6
+ "min": 0.49898743629455566,
7
+ "max": 1.7881392240524292,
8
+ "count": 200
9
+ },
10
+ "Agent.Policy.Entropy.sum": {
11
+ "value": 7941.86962890625,
12
+ "min": 6877.56591796875,
13
+ "max": 26929.376953125,
14
+ "count": 200
15
+ },
16
+ "Agent.Environment.EpisodeLength.mean": {
17
+ "value": 183.85185185185185,
18
+ "min": 35.369565217391305,
19
+ "max": 577.7777777777778,
20
+ "count": 200
21
+ },
22
+ "Agent.Environment.EpisodeLength.sum": {
23
+ "value": 14892.0,
24
+ "min": 12399.0,
25
+ "max": 17439.0,
26
+ "count": 200
27
+ },
28
+ "Agent.OceanPlasticCollector.GlobalReward.mean": {
29
+ "value": 298.25300991119315,
30
+ "min": 1.3685010688491916,
31
+ "max": 562.4681555451618,
32
+ "count": 200
33
+ },
34
+ "Agent.OceanPlasticCollector.GlobalReward.sum": {
35
+ "value": 47422.22857587971,
36
+ "min": 611.7199777755886,
37
+ "max": 69435.68168136477,
38
+ "count": 200
39
+ },
40
+ "Agent.OceanPlasticCollector.LocalReward.mean": {
41
+ "value": 157.80503144654088,
42
+ "min": 19.0917225950783,
43
+ "max": 257.9375,
44
+ "count": 200
45
+ },
46
+ "Agent.OceanPlasticCollector.LocalReward.sum": {
47
+ "value": 25091.0,
48
+ "min": 6873.0,
49
+ "max": 33543.0,
50
+ "count": 200
51
+ },
52
+ "Agent.Environment.LessonNumber.task.mean": {
53
+ "value": 0.0,
54
+ "min": 0.0,
55
+ "max": 0.0,
56
+ "count": 200
57
+ },
58
+ "Agent.Environment.LessonNumber.task.sum": {
59
+ "value": 0.0,
60
+ "min": 0.0,
61
+ "max": 0.0,
62
+ "count": 200
63
+ },
64
+ "Agent.Step.mean": {
65
+ "value": 2999856.0,
66
+ "min": 14995.0,
67
+ "max": 2999856.0,
68
+ "count": 200
69
+ },
70
+ "Agent.Step.sum": {
71
+ "value": 2999856.0,
72
+ "min": 14995.0,
73
+ "max": 2999856.0,
74
+ "count": 200
75
+ },
76
+ "Agent.Policy.CuriosityValueEstimate.mean": {
77
+ "value": 0.27030399441719055,
78
+ "min": -0.004683023318648338,
79
+ "max": 1.2223944664001465,
80
+ "count": 200
81
+ },
82
+ "Agent.Policy.CuriosityValueEstimate.sum": {
83
+ "value": 21.894624710083008,
84
+ "min": -1.2175860404968262,
85
+ "max": 118.57225799560547,
86
+ "count": 200
87
+ },
88
+ "Agent.Policy.ExtrinsicValueEstimate.mean": {
89
+ "value": 288.5648498535156,
90
+ "min": -8.056370735168457,
91
+ "max": 348.76446533203125,
92
+ "count": 200
93
+ },
94
+ "Agent.Policy.ExtrinsicValueEstimate.sum": {
95
+ "value": 23373.75390625,
96
+ "min": -2094.656494140625,
97
+ "max": 41161.15625,
98
+ "count": 200
99
+ },
100
+ "Agent.Environment.CumulativeReward.mean": {
101
+ "value": 885.9131869269006,
102
+ "min": -12.916335125571315,
103
+ "max": 1451.7606051830535,
104
+ "count": 200
105
+ },
106
+ "Agent.Environment.CumulativeReward.sum": {
107
+ "value": 71758.96814107895,
108
+ "min": -5321.530071735382,
109
+ "max": 99525.08007621765,
110
+ "count": 200
111
+ },
112
+ "Agent.Policy.CuriosityReward.mean": {
113
+ "value": 0.30991002497619685,
114
+ "min": 0.05206376354852058,
115
+ "max": 8.034196149844389,
116
+ "count": 200
117
+ },
118
+ "Agent.Policy.CuriosityReward.sum": {
119
+ "value": 25.102712023071945,
120
+ "min": 21.45027058199048,
121
+ "max": 274.9943250827491,
122
+ "count": 200
123
+ },
124
+ "Agent.Policy.ExtrinsicReward.mean": {
125
+ "value": 797.3218594009494,
126
+ "min": -11.624701671519325,
127
+ "max": 1306.5845138062823,
128
+ "count": 200
129
+ },
130
+ "Agent.Policy.ExtrinsicReward.sum": {
131
+ "value": 64583.0706114769,
132
+ "min": -4789.377088665962,
133
+ "max": 89572.56913685799,
134
+ "count": 200
135
+ },
136
+ "Agent.Losses.PolicyLoss.mean": {
137
+ "value": 0.022236945216233532,
138
+ "min": 0.016801928527032335,
139
+ "max": 0.032498174579814076,
140
+ "count": 200
141
+ },
142
+ "Agent.Losses.PolicyLoss.sum": {
143
+ "value": 0.044473890432467064,
144
+ "min": 0.016801928527032335,
145
+ "max": 0.0605002747580522,
146
+ "count": 200
147
+ },
148
+ "Agent.Losses.ValueLoss.mean": {
149
+ "value": 3485.494217936198,
150
+ "min": 126.44592258568966,
151
+ "max": 6151.284423828125,
152
+ "count": 200
153
+ },
154
+ "Agent.Losses.ValueLoss.sum": {
155
+ "value": 6970.988435872396,
156
+ "min": 126.62573547363282,
157
+ "max": 10417.76612141927,
158
+ "count": 200
159
+ },
160
+ "Agent.Policy.LearningRate.mean": {
161
+ "value": 7.701997432999959e-07,
162
+ "min": 7.701997432999959e-07,
163
+ "max": 0.00029895630034790005,
164
+ "count": 200
165
+ },
166
+ "Agent.Policy.LearningRate.sum": {
167
+ "value": 1.5403994865999917e-06,
168
+ "min": 1.5403994865999917e-06,
169
+ "max": 0.0005927607024131001,
170
+ "count": 200
171
+ },
172
+ "Agent.Policy.Epsilon.mean": {
173
+ "value": 0.10025670000000002,
174
+ "min": 0.10025670000000002,
175
+ "max": 0.19965209999999994,
176
+ "count": 200
177
+ },
178
+ "Agent.Policy.Epsilon.sum": {
179
+ "value": 0.20051340000000004,
180
+ "min": 0.10077589999999997,
181
+ "max": 0.3975869000000001,
182
+ "count": 200
183
+ },
184
+ "Agent.Policy.Beta.mean": {
185
+ "value": 2.280932999999993e-05,
186
+ "min": 2.280932999999993e-05,
187
+ "max": 0.004982639789999998,
188
+ "count": 200
189
+ },
190
+ "Agent.Policy.Beta.sum": {
191
+ "value": 4.561865999999986e-05,
192
+ "min": 4.561865999999986e-05,
193
+ "max": 0.00987958631,
194
+ "count": 200
195
+ },
196
+ "Agent.Losses.CuriosityForwardLoss.mean": {
197
+ "value": 0.01628636669677993,
198
+ "min": 0.015522598568350077,
199
+ "max": 0.1835712509850661,
200
+ "count": 200
201
+ },
202
+ "Agent.Losses.CuriosityForwardLoss.sum": {
203
+ "value": 0.03257273339355986,
204
+ "min": 0.015905051957815886,
205
+ "max": 0.33890696118275326,
206
+ "count": 200
207
+ },
208
+ "Agent.Losses.CuriosityInverseLoss.mean": {
209
+ "value": 0.1593244475622972,
210
+ "min": 0.12232522579530875,
211
+ "max": 1.7913235346476237,
212
+ "count": 200
213
+ },
214
+ "Agent.Losses.CuriosityInverseLoss.sum": {
215
+ "value": 0.3186488951245944,
216
+ "min": 0.12283845146497091,
217
+ "max": 3.0615293025970463,
218
+ "count": 200
219
+ },
220
+ "Agent.IsTraining.mean": {
221
+ "value": 1.0,
222
+ "min": 1.0,
223
+ "max": 1.0,
224
+ "count": 200
225
+ },
226
+ "Agent.IsTraining.sum": {
227
+ "value": 1.0,
228
+ "min": 1.0,
229
+ "max": 1.0,
230
+ "count": 200
231
+ }
232
+ },
233
+ "metadata": {
234
+ "timer_format_version": "0.1.0",
235
+ "start_time_seconds": "1716785038",
236
+ "python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
237
+ "command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/OceanPlasticCollection_task_2_run_id_1_train.yaml --run-id=OceanPlasticCollection/train/OceanPlasticCollection_task_2_run_id_1_train --base-port 5008",
238
+ "mlagents_version": "0.30.0",
239
+ "mlagents_envs_version": "0.30.0",
240
+ "communication_protocol_version": "1.5.0",
241
+ "pytorch_version": "1.7.1+cu110",
242
+ "numpy_version": "1.21.0",
243
+ "end_time_seconds": "1716811251"
244
+ },
245
+ "total": 26213.0630382,
246
+ "count": 1,
247
+ "self": 0.7105160000028263,
248
+ "children": {
249
+ "run_training.setup": {
250
+ "total": 0.05819149999999995,
251
+ "count": 1,
252
+ "self": 0.05819149999999995
253
+ },
254
+ "TrainerController.start_learning": {
255
+ "total": 26212.2943307,
256
+ "count": 1,
257
+ "self": 23.690154799714946,
258
+ "children": {
259
+ "TrainerController._reset_env": {
260
+ "total": 3.7668695000000003,
261
+ "count": 1,
262
+ "self": 3.7668695000000003
263
+ },
264
+ "TrainerController.advance": {
265
+ "total": 26184.666235600285,
266
+ "count": 1004516,
267
+ "self": 20.54277140076374,
268
+ "children": {
269
+ "env_step": {
270
+ "total": 26164.12346419952,
271
+ "count": 1004516,
272
+ "self": 17152.511751000777,
273
+ "children": {
274
+ "SubprocessEnvManager._take_step": {
275
+ "total": 8998.20145619884,
276
+ "count": 1004516,
277
+ "self": 43.404690699311686,
278
+ "children": {
279
+ "TorchPolicy.evaluate": {
280
+ "total": 8954.796765499528,
281
+ "count": 1000011,
282
+ "self": 8954.796765499528
283
+ }
284
+ }
285
+ },
286
+ "workers": {
287
+ "total": 13.410256999903122,
288
+ "count": 1004516,
289
+ "self": 0.0,
290
+ "children": {
291
+ "worker_root": {
292
+ "total": 26186.88637710276,
293
+ "count": 1004516,
294
+ "is_parallel": true,
295
+ "self": 9875.071289401678,
296
+ "children": {
297
+ "steps_from_proto": {
298
+ "total": 0.00048389999999987054,
299
+ "count": 1,
300
+ "is_parallel": true,
301
+ "self": 0.00012210000000045795,
302
+ "children": {
303
+ "_process_maybe_compressed_observation": {
304
+ "total": 0.00025109999999983756,
305
+ "count": 2,
306
+ "is_parallel": true,
307
+ "self": 3.3800000000194785e-05,
308
+ "children": {
309
+ "_observation_to_np_array": {
310
+ "total": 0.00021729999999964278,
311
+ "count": 3,
312
+ "is_parallel": true,
313
+ "self": 0.00021729999999964278
314
+ }
315
+ }
316
+ },
317
+ "_process_rank_one_or_two_observation": {
318
+ "total": 0.00011069999999957503,
319
+ "count": 2,
320
+ "is_parallel": true,
321
+ "self": 0.00011069999999957503
322
+ }
323
+ }
324
+ },
325
+ "UnityEnvironment.step": {
326
+ "total": 16311.814603801084,
327
+ "count": 1004516,
328
+ "is_parallel": true,
329
+ "self": 121.67509509960837,
330
+ "children": {
331
+ "UnityEnvironment._generate_step_input": {
332
+ "total": 61.15591220182779,
333
+ "count": 1004516,
334
+ "is_parallel": true,
335
+ "self": 61.15591220182779
336
+ },
337
+ "communicator.exchange": {
338
+ "total": 15674.75046439934,
339
+ "count": 1004516,
340
+ "is_parallel": true,
341
+ "self": 15674.75046439934
342
+ },
343
+ "steps_from_proto": {
344
+ "total": 454.2331321003083,
345
+ "count": 1004516,
346
+ "is_parallel": true,
347
+ "self": 106.32643739959605,
348
+ "children": {
349
+ "_process_maybe_compressed_observation": {
350
+ "total": 287.21077120065667,
351
+ "count": 2009032,
352
+ "is_parallel": true,
353
+ "self": 37.419722703589116,
354
+ "children": {
355
+ "_observation_to_np_array": {
356
+ "total": 249.79104849706755,
357
+ "count": 3016701,
358
+ "is_parallel": true,
359
+ "self": 249.79104849706755
360
+ }
361
+ }
362
+ },
363
+ "_process_rank_one_or_two_observation": {
364
+ "total": 60.695923500055606,
365
+ "count": 2009032,
366
+ "is_parallel": true,
367
+ "self": 60.695923500055606
368
+ }
369
+ }
370
+ }
371
+ }
372
+ }
373
+ }
374
+ }
375
+ }
376
+ }
377
+ }
378
+ }
379
+ }
380
+ },
381
+ "trainer_threads": {
382
+ "total": 3.369999831193127e-05,
383
+ "count": 1,
384
+ "self": 3.369999831193127e-05,
385
+ "children": {
386
+ "thread_root": {
387
+ "total": 0.0,
388
+ "count": 0,
389
+ "is_parallel": true,
390
+ "self": 0.0,
391
+ "children": {
392
+ "trainer_advance": {
393
+ "total": 26187.903691299587,
394
+ "count": 1386719,
395
+ "is_parallel": true,
396
+ "self": 56.37880670134837,
397
+ "children": {
398
+ "process_trajectory": {
399
+ "total": 21975.25524539823,
400
+ "count": 1386719,
401
+ "is_parallel": true,
402
+ "self": 21973.72103539823,
403
+ "children": {
404
+ "RLTrainer._checkpoint": {
405
+ "total": 1.5342100000025312,
406
+ "count": 6,
407
+ "is_parallel": true,
408
+ "self": 1.5342100000025312
409
+ }
410
+ }
411
+ },
412
+ "_update_policy": {
413
+ "total": 4156.269639200007,
414
+ "count": 277,
415
+ "is_parallel": true,
416
+ "self": 2784.3974990999604,
417
+ "children": {
418
+ "TorchPPOOptimizer.update": {
419
+ "total": 1371.8721401000469,
420
+ "count": 8490,
421
+ "is_parallel": true,
422
+ "self": 1371.8721401000469
423
+ }
424
+ }
425
+ }
426
+ }
427
+ }
428
+ }
429
+ }
430
+ }
431
+ },
432
+ "TrainerController._save_models": {
433
+ "total": 0.17103710000083083,
434
+ "count": 1,
435
+ "self": 0.00504930000170134,
436
+ "children": {
437
+ "RLTrainer._checkpoint": {
438
+ "total": 0.1659877999991295,
439
+ "count": 1,
440
+ "self": 0.1659877999991295
441
+ }
442
+ }
443
+ }
444
+ }
445
+ }
446
+ }
447
+ }
run_logs/training_status.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task": {
3
+ "lesson_num": 0
4
+ },
5
+ "Agent": {
6
+ "checkpoints": [
7
+ {
8
+ "steps": 1499962,
9
+ "file_path": "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_2_run_id_1_train\\Agent\\Agent-1499962.onnx",
10
+ "reward": 837.7376002502441,
11
+ "creation_time": 1716798196.4994848,
12
+ "auxillary_file_paths": [
13
+ "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_2_run_id_1_train\\Agent\\Agent-1499962.pt"
14
+ ]
15
+ },
16
+ {
17
+ "steps": 1999995,
18
+ "file_path": "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_2_run_id_1_train\\Agent\\Agent-1999995.onnx",
19
+ "reward": 1030.3363168080648,
20
+ "creation_time": 1716802793.6793845,
21
+ "auxillary_file_paths": [
22
+ "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_2_run_id_1_train\\Agent\\Agent-1999995.pt"
23
+ ]
24
+ },
25
+ {
26
+ "steps": 2499935,
27
+ "file_path": "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_2_run_id_1_train\\Agent\\Agent-2499935.onnx",
28
+ "reward": 1108.6876445097082,
29
+ "creation_time": 1716807131.4435964,
30
+ "auxillary_file_paths": [
31
+ "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_2_run_id_1_train\\Agent\\Agent-2499935.pt"
32
+ ]
33
+ },
34
+ {
35
+ "steps": 2999856,
36
+ "file_path": "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_2_run_id_1_train\\Agent\\Agent-2999856.onnx",
37
+ "reward": 689.179988179888,
38
+ "creation_time": 1716811250.2222645,
39
+ "auxillary_file_paths": [
40
+ "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_2_run_id_1_train\\Agent\\Agent-2999856.pt"
41
+ ]
42
+ },
43
+ {
44
+ "steps": 3000012,
45
+ "file_path": "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_2_run_id_1_train\\Agent\\Agent-3000012.onnx",
46
+ "reward": 659.4166553497314,
47
+ "creation_time": 1716811250.426181,
48
+ "auxillary_file_paths": [
49
+ "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_2_run_id_1_train\\Agent\\Agent-3000012.pt"
50
+ ]
51
+ }
52
+ ],
53
+ "final_checkpoint": {
54
+ "steps": 3000012,
55
+ "file_path": "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_2_run_id_1_train\\Agent.onnx",
56
+ "reward": 659.4166553497314,
57
+ "creation_time": 1716811250.426181,
58
+ "auxillary_file_paths": [
59
+ "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_2_run_id_1_train\\Agent\\Agent-3000012.pt"
60
+ ]
61
+ }
62
+ },
63
+ "metadata": {
64
+ "stats_format_version": "0.3.0",
65
+ "mlagents_version": "0.30.0",
66
+ "torch_version": "1.7.1+cu110"
67
+ }
68
+ }