File size: 18,741 Bytes
b01ddde
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
{
    "name": "root",
    "gauges": {
        "Pyramids.Policy.Entropy.mean": {
            "value": 0.6478382349014282,
            "min": 0.6478382349014282,
            "max": 1.501163363456726,
            "count": 20
        },
        "Pyramids.Policy.Entropy.sum": {
            "value": 19414.416015625,
            "min": 19414.416015625,
            "max": 45539.29296875,
            "count": 20
        },
        "Pyramids.Step.mean": {
            "value": 599938.0,
            "min": 29952.0,
            "max": 599938.0,
            "count": 20
        },
        "Pyramids.Step.sum": {
            "value": 599938.0,
            "min": 29952.0,
            "max": 599938.0,
            "count": 20
        },
        "Pyramids.Policy.ExtrinsicValueEstimate.mean": {
            "value": 0.18724876642227173,
            "min": -0.18214330077171326,
            "max": 0.18724876642227173,
            "count": 20
        },
        "Pyramids.Policy.ExtrinsicValueEstimate.sum": {
            "value": 47.561187744140625,
            "min": -43.16796112060547,
            "max": 47.561187744140625,
            "count": 20
        },
        "Pyramids.Policy.RndValueEstimate.mean": {
            "value": 0.005261237267404795,
            "min": 0.005261237267404795,
            "max": 0.3343900442123413,
            "count": 20
        },
        "Pyramids.Policy.RndValueEstimate.sum": {
            "value": 1.3363542556762695,
            "min": 1.3363542556762695,
            "max": 79.25044250488281,
            "count": 20
        },
        "Pyramids.Losses.PolicyLoss.mean": {
            "value": 0.06920473634541155,
            "min": 0.06683283463617834,
            "max": 0.07195383903981176,
            "count": 20
        },
        "Pyramids.Losses.PolicyLoss.sum": {
            "value": 0.9688663088357617,
            "min": 0.47693756886074806,
            "max": 1.0728707710659366,
            "count": 20
        },
        "Pyramids.Losses.ValueLoss.mean": {
            "value": 0.011946729921744139,
            "min": 0.0008682443716028568,
            "max": 0.011946729921744139,
            "count": 20
        },
        "Pyramids.Losses.ValueLoss.sum": {
            "value": 0.16725421890441794,
            "min": 0.0060777106012199975,
            "max": 0.16725421890441794,
            "count": 20
        },
        "Pyramids.Policy.LearningRate.mean": {
            "value": 7.054847648416671e-06,
            "min": 7.054847648416671e-06,
            "max": 0.0002919177169798095,
            "count": 20
        },
        "Pyramids.Policy.LearningRate.sum": {
            "value": 9.876786707783339e-05,
            "min": 9.876786707783339e-05,
            "max": 0.003045879384707,
            "count": 20
        },
        "Pyramids.Policy.Epsilon.mean": {
            "value": 0.10235158333333332,
            "min": 0.10235158333333332,
            "max": 0.1973059047619048,
            "count": 20
        },
        "Pyramids.Policy.Epsilon.sum": {
            "value": 1.4329221666666665,
            "min": 1.3485226666666668,
            "max": 2.4152929999999997,
            "count": 20
        },
        "Pyramids.Policy.Beta.mean": {
            "value": 0.00024492317500000003,
            "min": 0.00024492317500000003,
            "max": 0.009730859885714286,
            "count": 20
        },
        "Pyramids.Policy.Beta.sum": {
            "value": 0.0034289244500000007,
            "min": 0.0034289244500000007,
            "max": 0.10156777070000002,
            "count": 20
        },
        "Pyramids.Losses.RNDLoss.mean": {
            "value": 0.016863195225596428,
            "min": 0.016720183193683624,
            "max": 0.3915402889251709,
            "count": 20
        },
        "Pyramids.Losses.RNDLoss.sum": {
            "value": 0.2360847443342209,
            "min": 0.2360847443342209,
            "max": 2.7407820224761963,
            "count": 20
        },
        "Pyramids.Environment.EpisodeLength.mean": {
            "value": 587.530612244898,
            "min": 587.530612244898,
            "max": 999.0,
            "count": 20
        },
        "Pyramids.Environment.EpisodeLength.sum": {
            "value": 28789.0,
            "min": 15984.0,
            "max": 32936.0,
            "count": 20
        },
        "Pyramids.Environment.CumulativeReward.mean": {
            "value": 1.0041754768819224,
            "min": -1.0000000521540642,
            "max": 1.0041754768819224,
            "count": 20
        },
        "Pyramids.Environment.CumulativeReward.sum": {
            "value": 49.2045983672142,
            "min": -32.000001668930054,
            "max": 49.2045983672142,
            "count": 20
        },
        "Pyramids.Policy.ExtrinsicReward.mean": {
            "value": 1.0041754768819224,
            "min": -1.0000000521540642,
            "max": 1.0041754768819224,
            "count": 20
        },
        "Pyramids.Policy.ExtrinsicReward.sum": {
            "value": 49.2045983672142,
            "min": -32.000001668930054,
            "max": 49.2045983672142,
            "count": 20
        },
        "Pyramids.Policy.RndReward.mean": {
            "value": 0.1021522772094539,
            "min": 0.1021522772094539,
            "max": 7.710925901308656,
            "count": 20
        },
        "Pyramids.Policy.RndReward.sum": {
            "value": 5.005461583263241,
            "min": 5.005461583263241,
            "max": 123.37481442093849,
            "count": 20
        },
        "Pyramids.IsTraining.mean": {
            "value": 1.0,
            "min": 1.0,
            "max": 1.0,
            "count": 20
        },
        "Pyramids.IsTraining.sum": {
            "value": 1.0,
            "min": 1.0,
            "max": 1.0,
            "count": 20
        }
    },
    "metadata": {
        "timer_format_version": "0.1.0",
        "start_time_seconds": "1714050857",
        "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
        "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
        "mlagents_version": "1.1.0.dev0",
        "mlagents_envs_version": "1.1.0.dev0",
        "communication_protocol_version": "1.5.0",
        "pytorch_version": "2.2.1+cu121",
        "numpy_version": "1.23.5",
        "end_time_seconds": "1714052892"
    },
    "total": 2035.5195693849998,
    "count": 1,
    "self": 1.170947453999588,
    "children": {
        "run_training.setup": {
            "total": 0.0873782520000077,
            "count": 1,
            "self": 0.0873782520000077
        },
        "TrainerController.start_learning": {
            "total": 2034.2612436790002,
            "count": 1,
            "self": 1.4362915860294834,
            "children": {
                "TrainerController._reset_env": {
                    "total": 2.5098585850000745,
                    "count": 1,
                    "self": 2.5098585850000745
                },
                "TrainerController.advance": {
                    "total": 2030.1492163069706,
                    "count": 37937,
                    "self": 1.71343659389413,
                    "children": {
                        "env_step": {
                            "total": 1311.7210331760184,
                            "count": 37937,
                            "self": 1202.3931517710591,
                            "children": {
                                "SubprocessEnvManager._take_step": {
                                    "total": 108.37235794696221,
                                    "count": 37937,
                                    "self": 5.114811040994255,
                                    "children": {
                                        "TorchPolicy.evaluate": {
                                            "total": 103.25754690596796,
                                            "count": 37559,
                                            "self": 103.25754690596796
                                        }
                                    }
                                },
                                "workers": {
                                    "total": 0.9555234579970602,
                                    "count": 37937,
                                    "self": 0.0,
                                    "children": {
                                        "worker_root": {
                                            "total": 2029.6379632149115,
                                            "count": 37937,
                                            "is_parallel": true,
                                            "self": 951.950502307945,
                                            "children": {
                                                "run_training.setup": {
                                                    "total": 0.0,
                                                    "count": 0,
                                                    "is_parallel": true,
                                                    "self": 0.0,
                                                    "children": {
                                                        "steps_from_proto": {
                                                            "total": 0.0022997490000307153,
                                                            "count": 1,
                                                            "is_parallel": true,
                                                            "self": 0.0006597380001949205,
                                                            "children": {
                                                                "_process_rank_one_or_two_observation": {
                                                                    "total": 0.0016400109998357948,
                                                                    "count": 8,
                                                                    "is_parallel": true,
                                                                    "self": 0.0016400109998357948
                                                                }
                                                            }
                                                        },
                                                        "UnityEnvironment.step": {
                                                            "total": 0.06654963899995892,
                                                            "count": 1,
                                                            "is_parallel": true,
                                                            "self": 0.0008212950001507124,
                                                            "children": {
                                                                "UnityEnvironment._generate_step_input": {
                                                                    "total": 0.0006649509998624126,
                                                                    "count": 1,
                                                                    "is_parallel": true,
                                                                    "self": 0.0006649509998624126
                                                                },
                                                                "communicator.exchange": {
                                                                    "total": 0.0628841889999876,
                                                                    "count": 1,
                                                                    "is_parallel": true,
                                                                    "self": 0.0628841889999876
                                                                },
                                                                "steps_from_proto": {
                                                                    "total": 0.0021792039999581903,
                                                                    "count": 1,
                                                                    "is_parallel": true,
                                                                    "self": 0.000498880999884932,
                                                                    "children": {
                                                                        "_process_rank_one_or_two_observation": {
                                                                            "total": 0.0016803230000732583,
                                                                            "count": 8,
                                                                            "is_parallel": true,
                                                                            "self": 0.0016803230000732583
                                                                        }
                                                                    }
                                                                }
                                                            }
                                                        }
                                                    }
                                                },
                                                "UnityEnvironment.step": {
                                                    "total": 1077.6874609069664,
                                                    "count": 37936,
                                                    "is_parallel": true,
                                                    "self": 32.51179417788785,
                                                    "children": {
                                                        "UnityEnvironment._generate_step_input": {
                                                            "total": 17.66922213103976,
                                                            "count": 37936,
                                                            "is_parallel": true,
                                                            "self": 17.66922213103976
                                                        },
                                                        "communicator.exchange": {
                                                            "total": 944.2755419109951,
                                                            "count": 37936,
                                                            "is_parallel": true,
                                                            "self": 944.2755419109951
                                                        },
                                                        "steps_from_proto": {
                                                            "total": 83.23090268704368,
                                                            "count": 37936,
                                                            "is_parallel": true,
                                                            "self": 17.833601884062773,
                                                            "children": {
                                                                "_process_rank_one_or_two_observation": {
                                                                    "total": 65.39730080298091,
                                                                    "count": 303488,
                                                                    "is_parallel": true,
                                                                    "self": 65.39730080298091
                                                                }
                                                            }
                                                        }
                                                    }
                                                }
                                            }
                                        }
                                    }
                                }
                            }
                        },
                        "trainer_advance": {
                            "total": 716.7147465370581,
                            "count": 37937,
                            "self": 2.7046983360226022,
                            "children": {
                                "process_trajectory": {
                                    "total": 110.84748384203613,
                                    "count": 37937,
                                    "self": 110.71504253303647,
                                    "children": {
                                        "RLTrainer._checkpoint": {
                                            "total": 0.13244130899965967,
                                            "count": 1,
                                            "self": 0.13244130899965967
                                        }
                                    }
                                },
                                "_update_policy": {
                                    "total": 603.1625643589994,
                                    "count": 258,
                                    "self": 239.17001531900723,
                                    "children": {
                                        "TorchPPOOptimizer.update": {
                                            "total": 363.9925490399921,
                                            "count": 13689,
                                            "self": 363.9925490399921
                                        }
                                    }
                                }
                            }
                        }
                    }
                },
                "trainer_threads": {
                    "total": 1.4830002328380942e-06,
                    "count": 1,
                    "self": 1.4830002328380942e-06
                },
                "TrainerController._save_models": {
                    "total": 0.1658757179998247,
                    "count": 1,
                    "self": 0.005956058999800007,
                    "children": {
                        "RLTrainer._checkpoint": {
                            "total": 0.1599196590000247,
                            "count": 1,
                            "self": 0.1599196590000247
                        }
                    }
                }
            }
        }
    }
}