ppo-Huggy / run_logs /timers.json
SiddhantKadwe's picture
Huggy
f5cc8b9
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4070881605148315,
"min": 1.4070881605148315,
"max": 1.4296834468841553,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70216.515625,
"min": 68554.0625,
"max": 78521.4765625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 72.0919708029197,
"min": 71.23809523809524,
"max": 396.24603174603175,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49383.0,
"min": 49054.0,
"max": 49927.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999947.0,
"min": 49455.0,
"max": 1999947.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999947.0,
"min": 49455.0,
"max": 1999947.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5009264945983887,
"min": 0.1595236361026764,
"max": 2.517597198486328,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1713.1346435546875,
"min": 19.940454483032227,
"max": 1713.1346435546875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9040863002303743,
"min": 1.6616817677021027,
"max": 4.009075381012138,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2674.2991156578064,
"min": 207.71022096276283,
"max": 2682.760626554489,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9040863002303743,
"min": 1.6616817677021027,
"max": 4.009075381012138,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2674.2991156578064,
"min": 207.71022096276283,
"max": 2682.760626554489,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017291983664553198,
"min": 0.013249979090121794,
"max": 0.020139412732937165,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051875950993659596,
"min": 0.028985246579547187,
"max": 0.060418238198811494,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06403110242552228,
"min": 0.019692322518676517,
"max": 0.06403110242552228,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.19209330727656682,
"min": 0.03938464503735303,
"max": 0.19209330727656682,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.888498703866668e-06,
"min": 3.888498703866668e-06,
"max": 0.0002953308765563749,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1665496111600005e-05,
"min": 1.1665496111600005e-05,
"max": 0.0008441634186122,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10129613333333333,
"min": 0.10129613333333333,
"max": 0.198443625,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3038884,
"min": 0.2077177,
"max": 0.5813878,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.467705333333337e-05,
"min": 7.467705333333337e-05,
"max": 0.004922336887500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022403116000000012,
"min": 0.00022403116000000012,
"max": 0.01407125122,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671739245",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671741505"
},
"total": 2259.696396846,
"count": 1,
"self": 0.38984296299986454,
"children": {
"run_training.setup": {
"total": 0.10587601600002472,
"count": 1,
"self": 0.10587601600002472
},
"TrainerController.start_learning": {
"total": 2259.200677867,
"count": 1,
"self": 3.936596392060892,
"children": {
"TrainerController._reset_env": {
"total": 7.572809459000041,
"count": 1,
"self": 7.572809459000041
},
"TrainerController.advance": {
"total": 2247.5761098059393,
"count": 232990,
"self": 4.130664601839726,
"children": {
"env_step": {
"total": 1779.6838272370892,
"count": 232990,
"self": 1493.417048194201,
"children": {
"SubprocessEnvManager._take_step": {
"total": 283.6415135059551,
"count": 232990,
"self": 14.587590795875371,
"children": {
"TorchPolicy.evaluate": {
"total": 269.05392271007975,
"count": 222878,
"self": 67.1786046720365,
"children": {
"TorchPolicy.sample_actions": {
"total": 201.87531803804325,
"count": 222878,
"self": 201.87531803804325
}
}
}
}
},
"workers": {
"total": 2.625265536933057,
"count": 232990,
"self": 0.0,
"children": {
"worker_root": {
"total": 2251.316164096019,
"count": 232990,
"is_parallel": true,
"self": 1021.8331281100254,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020334899999170375,
"count": 1,
"is_parallel": true,
"self": 0.00034157199991113885,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016919180000058986,
"count": 2,
"is_parallel": true,
"self": 0.0016919180000058986
}
}
},
"UnityEnvironment.step": {
"total": 0.028143080000063492,
"count": 1,
"is_parallel": true,
"self": 0.00028246900012618426,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001826239999900281,
"count": 1,
"is_parallel": true,
"self": 0.0001826239999900281
},
"communicator.exchange": {
"total": 0.02692541099997925,
"count": 1,
"is_parallel": true,
"self": 0.02692541099997925
},
"steps_from_proto": {
"total": 0.0007525759999680304,
"count": 1,
"is_parallel": true,
"self": 0.00026278600000750885,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004897899999605215,
"count": 2,
"is_parallel": true,
"self": 0.0004897899999605215
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1229.4830359859934,
"count": 232989,
"is_parallel": true,
"self": 35.03736436917052,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.0676598829383,
"count": 232989,
"is_parallel": true,
"self": 79.0676598829383
},
"communicator.exchange": {
"total": 1018.9770998639249,
"count": 232989,
"is_parallel": true,
"self": 1018.9770998639249
},
"steps_from_proto": {
"total": 96.4009118699596,
"count": 232989,
"is_parallel": true,
"self": 41.62924328994427,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.77166858001533,
"count": 465978,
"is_parallel": true,
"self": 54.77166858001533
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 463.76161796701035,
"count": 232990,
"self": 6.135275802009232,
"children": {
"process_trajectory": {
"total": 152.7127968470005,
"count": 232990,
"self": 151.5331891440004,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1796077030001015,
"count": 10,
"self": 1.1796077030001015
}
}
},
"_update_policy": {
"total": 304.9135453180006,
"count": 97,
"self": 252.73040400599473,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.18314131200589,
"count": 2910,
"self": 52.18314131200589
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.96999915514607e-07,
"count": 1,
"self": 9.96999915514607e-07
},
"TrainerController._save_models": {
"total": 0.11516121300019222,
"count": 1,
"self": 0.00203562900014731,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11312558400004491,
"count": 1,
"self": 0.11312558400004491
}
}
}
}
}
}
}