{ "name": "root", "gauges": { "SnowballTarget.Policy.Entropy.mean": { "value": 1.0697780847549438, "min": 1.0697780847549438, "max": 2.8683438301086426, "count": 20 }, "SnowballTarget.Policy.Entropy.sum": { "value": 10226.0087890625, "min": 10226.0087890625, "max": 29343.15625, "count": 20 }, "SnowballTarget.Step.mean": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Step.sum": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": { "value": 12.553525924682617, "min": 0.3380628824234009, "max": 12.553525924682617, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": { "value": 2447.9375, "min": 65.58419799804688, "max": 2522.19921875, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.mean": { "value": 199.0, "min": 199.0, "max": 199.0, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.sum": { "value": 8756.0, "min": 8756.0, "max": 10945.0, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.mean": { "value": 0.06195714252152909, "min": 0.056727134894040944, "max": 0.07375957315968003, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.sum": { "value": 0.24782857008611636, "min": 0.24782857008611636, "max": 0.36586041195208535, "count": 20 }, "SnowballTarget.Losses.ValueLoss.mean": { "value": 0.21091891416147643, "min": 0.09762589579594194, "max": 0.269247799016097, "count": 20 }, "SnowballTarget.Losses.ValueLoss.sum": { "value": 0.8436756566459057, "min": 0.39050358318376777, "max": 1.2695586561572316, "count": 20 }, "SnowballTarget.Policy.LearningRate.mean": { "value": 8.082097306000005e-06, "min": 8.082097306000005e-06, "max": 0.000291882002706, "count": 20 }, "SnowballTarget.Policy.LearningRate.sum": { "value": 3.232838922400002e-05, "min": 3.232838922400002e-05, "max": 0.00138516003828, "count": 20 }, "SnowballTarget.Policy.Epsilon.mean": { "value": 0.10269400000000001, "min": 0.10269400000000001, "max": 0.19729400000000002, "count": 20 }, "SnowballTarget.Policy.Epsilon.sum": { "value": 0.41077600000000003, "min": 0.41077600000000003, "max": 0.96172, "count": 20 }, "SnowballTarget.Policy.Beta.mean": { "value": 0.0001444306000000001, "min": 0.0001444306000000001, "max": 0.0048649706, "count": 20 }, "SnowballTarget.Policy.Beta.sum": { "value": 0.0005777224000000004, "min": 0.0005777224000000004, "max": 0.023089828, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.mean": { "value": 25.40909090909091, "min": 2.727272727272727, "max": 25.40909090909091, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.sum": { "value": 1118.0, "min": 120.0, "max": 1347.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.mean": { "value": 25.40909090909091, "min": 2.727272727272727, "max": 25.40909090909091, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.sum": { "value": 1118.0, "min": 120.0, "max": 1347.0, "count": 20 }, "SnowballTarget.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 }, "SnowballTarget.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1679667973", "python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1679668443" }, "total": 469.563169039, "count": 1, "self": 0.4850755740000636, "children": { "run_training.setup": { "total": 0.10737556600000175, "count": 1, "self": 0.10737556600000175 }, "TrainerController.start_learning": { "total": 468.97071789899996, "count": 1, "self": 0.603384242987886, "children": { "TrainerController._reset_env": { "total": 9.840860354, "count": 1, "self": 9.840860354 }, "TrainerController.advance": { "total": 458.3059869790122, "count": 18215, "self": 0.3004455530094674, "children": { "env_step": { "total": 458.0055414260027, "count": 18215, "self": 332.59177673899796, "children": { "SubprocessEnvManager._take_step": { "total": 125.11688668900226, "count": 18215, "self": 2.123206571005994, "children": { "TorchPolicy.evaluate": { "total": 122.99368011799626, "count": 18215, "self": 122.99368011799626 } } }, "workers": { "total": 0.29687799800248627, "count": 18215, "self": 0.0, "children": { "worker_root": { "total": 467.39183227600336, "count": 18215, "is_parallel": true, "self": 220.5094537810021, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.005961478999978453, "count": 1, "is_parallel": true, "self": 0.004503338999995776, "children": { "_process_rank_one_or_two_observation": { "total": 0.0014581399999826772, "count": 10, "is_parallel": true, "self": 0.0014581399999826772 } } }, "UnityEnvironment.step": { "total": 0.0823367750000159, "count": 1, "is_parallel": true, "self": 0.0006263060000151199, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00043697699999256656, "count": 1, "is_parallel": true, "self": 0.00043697699999256656 }, "communicator.exchange": { "total": 0.07732304500001419, "count": 1, "is_parallel": true, "self": 0.07732304500001419 }, "steps_from_proto": { "total": 0.003950446999994028, "count": 1, "is_parallel": true, "self": 0.0004171950000397828, "children": { "_process_rank_one_or_two_observation": { "total": 0.0035332519999542455, "count": 10, "is_parallel": true, "self": 0.0035332519999542455 } } } } } } }, "UnityEnvironment.step": { "total": 246.88237849500126, "count": 18214, "is_parallel": true, "self": 9.676320460013756, "children": { "UnityEnvironment._generate_step_input": { "total": 5.123005547004794, "count": 18214, "is_parallel": true, "self": 5.123005547004794 }, "communicator.exchange": { "total": 200.71699000099255, "count": 18214, "is_parallel": true, "self": 200.71699000099255 }, "steps_from_proto": { "total": 31.36606248699016, "count": 18214, "is_parallel": true, "self": 6.403781661980588, "children": { "_process_rank_one_or_two_observation": { "total": 24.96228082500957, "count": 182140, "is_parallel": true, "self": 24.96228082500957 } } } } } } } } } } } } }, "trainer_threads": { "total": 0.00014572199995654955, "count": 1, "self": 0.00014572199995654955, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 455.09048637799674, "count": 404521, "is_parallel": true, "self": 9.865429724993874, "children": { "process_trajectory": { "total": 253.57972287200235, "count": 404521, "is_parallel": true, "self": 252.41019897900233, "children": { "RLTrainer._checkpoint": { "total": 1.1695238930000187, "count": 4, "is_parallel": true, "self": 1.1695238930000187 } } }, "_update_policy": { "total": 191.6453337810005, "count": 90, "is_parallel": true, "self": 68.70631199399975, "children": { "TorchPPOOptimizer.update": { "total": 122.93902178700074, "count": 4587, "is_parallel": true, "self": 122.93902178700074 } } } } } } } } }, "TrainerController._save_models": { "total": 0.22034060099997532, "count": 1, "self": 0.0011415860000170142, "children": { "RLTrainer._checkpoint": { "total": 0.2191990149999583, "count": 1, "self": 0.2191990149999583 } } } } } } }