{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.401753544807434, "min": 1.401753544807434, "max": 1.4274451732635498, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 68635.4609375, "min": 68132.1875, "max": 76965.4921875, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 103.3945720250522, "min": 88.09269162210339, "max": 374.44776119402985, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49526.0, "min": 48929.0, "max": 50387.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999939.0, "min": 49560.0, "max": 1999939.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999939.0, "min": 49560.0, "max": 1999939.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.340867042541504, "min": 0.08379607647657394, "max": 2.42789888381958, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1121.2752685546875, "min": 11.144878387451172, "max": 1334.834228515625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.5715689065560916, "min": 1.7234920630777688, "max": 3.9087947966285452, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 1710.781506240368, "min": 229.22444438934326, "max": 2100.6433007121086, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.5715689065560916, "min": 1.7234920630777688, "max": 3.9087947966285452, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 1710.781506240368, "min": 229.22444438934326, "max": 2100.6433007121086, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.016197532452659088, "min": 0.014020869886735454, "max": 0.023250225720888314, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.04859259735797726, "min": 0.02804173977347091, "max": 0.05697563168747971, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.04702861487037605, "min": 0.022123388325174652, "max": 0.05842035487294197, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.14108584461112816, "min": 0.044246776650349304, "max": 0.1752610646188259, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.3935988688333272e-06, "min": 3.3935988688333272e-06, "max": 0.000295353676548775, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.0180796606499981e-05, "min": 1.0180796606499981e-05, "max": 0.0008441047686317499, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.1011311666666667, "min": 0.1011311666666667, "max": 0.19845122499999998, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.3033935000000001, "min": 0.20742845000000004, "max": 0.5813682500000001, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.644521666666657e-05, "min": 6.644521666666657e-05, "max": 0.004922716127500001, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00019933564999999974, "min": 0.00019933564999999974, "max": 0.014070275675, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1670442174", "python_version": "3.8.15 (default, Oct 12 2022, 19:14:39) \n[GCC 7.5.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1670444704" }, "total": 2529.9352766380002, "count": 1, "self": 0.7919310470001619, "children": { "run_training.setup": { "total": 0.11330050799995206, "count": 1, "self": 0.11330050799995206 }, "TrainerController.start_learning": { "total": 2529.030045083, "count": 1, "self": 4.40418245599767, "children": { "TrainerController._reset_env": { "total": 11.646293401000094, "count": 1, "self": 11.646293401000094 }, "TrainerController.advance": { "total": 2512.7795662920025, "count": 231530, "self": 4.891584421027801, "children": { "env_step": { "total": 1994.8559701000204, "count": 231530, "self": 1670.1736234200966, "children": { "SubprocessEnvManager._take_step": { "total": 321.7061777990675, "count": 231530, "self": 16.4057203290306, "children": { "TorchPolicy.evaluate": { "total": 305.3004574700369, "count": 222902, "self": 74.69308086311844, "children": { "TorchPolicy.sample_actions": { "total": 230.60737660691848, "count": 222902, "self": 230.60737660691848 } } } } }, "workers": { "total": 2.976168880856221, "count": 231530, "self": 0.0, "children": { "worker_root": { "total": 2520.1656593028856, "count": 231530, "is_parallel": true, "self": 1153.8243419689318, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.002162259999977323, "count": 1, "is_parallel": true, "self": 0.00036496199993507616, "children": { "_process_rank_one_or_two_observation": { "total": 0.0017972980000422467, "count": 2, "is_parallel": true, "self": 0.0017972980000422467 } } }, "UnityEnvironment.step": { "total": 0.03039566500001456, "count": 1, "is_parallel": true, "self": 0.0003121170000213169, "children": { "UnityEnvironment._generate_step_input": { "total": 0.000206573000014032, "count": 1, "is_parallel": true, "self": 0.000206573000014032 }, "communicator.exchange": { "total": 0.02908753700012312, "count": 1, "is_parallel": true, "self": 0.02908753700012312 }, "steps_from_proto": { "total": 0.0007894379998560908, "count": 1, "is_parallel": true, "self": 0.0002865869998913695, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005028509999647213, "count": 2, "is_parallel": true, "self": 0.0005028509999647213 } } } } } } }, "UnityEnvironment.step": { "total": 1366.3413173339538, "count": 231529, "is_parallel": true, "self": 37.07655427009968, "children": { "UnityEnvironment._generate_step_input": { "total": 86.41799132604001, "count": 231529, "is_parallel": true, "self": 86.41799132604001 }, "communicator.exchange": { "total": 1139.9299641389337, "count": 231529, "is_parallel": true, "self": 1139.9299641389337 }, "steps_from_proto": { "total": 102.9168075988805, "count": 231529, "is_parallel": true, "self": 44.59458737481964, "children": { "_process_rank_one_or_two_observation": { "total": 58.32222022406086, "count": 463058, "is_parallel": true, "self": 58.32222022406086 } } } } } } } } } } }, "trainer_advance": { "total": 513.0320117709541, "count": 231530, "self": 6.90957543400782, "children": { "process_trajectory": { "total": 167.71098176994542, "count": 231530, "self": 167.07109675794595, "children": { "RLTrainer._checkpoint": { "total": 0.6398850119994677, "count": 4, "self": 0.6398850119994677 } } }, "_update_policy": { "total": 338.4114545670009, "count": 97, "self": 282.6998120100111, "children": { "TorchPPOOptimizer.update": { "total": 55.71164255698977, "count": 2910, "self": 55.71164255698977 } } } } } } }, "trainer_threads": { "total": 1.416000031895237e-06, "count": 1, "self": 1.416000031895237e-06 }, "TrainerController._save_models": { "total": 0.2000015179996808, "count": 1, "self": 0.0026883639998231956, "children": { "RLTrainer._checkpoint": { "total": 0.1973131539998576, "count": 1, "self": 0.1973131539998576 } } } } } } }