ppo-Pyramids / run_logs /timers.json
sayby's picture
Unit 5
3a5136b
raw
history blame contribute delete
No virus
19.2 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3489169776439667,
"min": 0.3489169776439667,
"max": 1.4469839334487915,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10445.1787109375,
"min": 10445.1787109375,
"max": 43895.703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989895.0,
"min": 29952.0,
"max": 989895.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989895.0,
"min": 29952.0,
"max": 989895.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6275317668914795,
"min": -0.09911149740219116,
"max": 0.705376386642456,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 175.08135986328125,
"min": -23.984981536865234,
"max": 201.73764038085938,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.04683122783899307,
"min": -0.0729188323020935,
"max": 0.2990512251853943,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 13.065912246704102,
"min": -18.74013900756836,
"max": 72.07134246826172,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06719487608781838,
"min": 0.06481342761493966,
"max": 0.07786511315144859,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9407282652294573,
"min": 0.5450557920601401,
"max": 1.028489025155032,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013678790159506857,
"min": 0.0007324744672122727,
"max": 0.01644993048726714,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.191503062233096,
"min": 0.010254642540971817,
"max": 0.23292076070656018,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.522468921114286e-06,
"min": 7.522468921114286e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001053145648956,
"min": 0.0001053145648956,
"max": 0.0036325900891366995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250745714285714,
"min": 0.10250745714285714,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351044,
"min": 1.3886848,
"max": 2.6108633000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002604949685714285,
"min": 0.0002604949685714285,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003646929559999999,
"min": 0.003646929559999999,
"max": 0.12110524367,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01063315849751234,
"min": 0.010255193337798119,
"max": 0.4634004235267639,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14886422455310822,
"min": 0.14357270300388336,
"max": 3.243803024291992,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 298.0315789473684,
"min": 278.94392523364485,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28313.0,
"min": 15984.0,
"max": 32792.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6598589366988132,
"min": -1.0000000521540642,
"max": 1.7210560562176125,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 157.68659898638725,
"min": -30.692601673305035,
"max": 184.15299801528454,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6598589366988132,
"min": -1.0000000521540642,
"max": 1.7210560562176125,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 157.68659898638725,
"min": -30.692601673305035,
"max": 184.15299801528454,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0327953547662075,
"min": 0.030903595108288627,
"max": 9.000462010502815,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1155587027897127,
"min": 3.0776165009010583,
"max": 144.00739216804504,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678361734",
"python_version": "3.8.15 (default, Nov 24 2022, 15:19:38) \n[GCC 11.2.0]",
"command_line_arguments": "/nhome/siniac/cbernard/miniconda3/envs/hugging_face5/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.23.5",
"end_time_seconds": "1678362616"
},
"total": 881.1467503271997,
"count": 1,
"self": 0.2217969410121441,
"children": {
"run_training.setup": {
"total": 0.03436267003417015,
"count": 1,
"self": 0.03436267003417015
},
"TrainerController.start_learning": {
"total": 880.8905907161534,
"count": 1,
"self": 0.7757627982646227,
"children": {
"TrainerController._reset_env": {
"total": 2.0165106672793627,
"count": 1,
"self": 2.0165106672793627
},
"TrainerController.advance": {
"total": 877.8678974807262,
"count": 64025,
"self": 0.7822886202484369,
"children": {
"env_step": {
"total": 508.22598874755204,
"count": 64025,
"self": 445.60231372341514,
"children": {
"SubprocessEnvManager._take_step": {
"total": 62.17632272467017,
"count": 64025,
"self": 2.2666479535400867,
"children": {
"TorchPolicy.evaluate": {
"total": 59.909674771130085,
"count": 62561,
"self": 21.82704901136458,
"children": {
"TorchPolicy.sample_actions": {
"total": 38.082625759765506,
"count": 62561,
"self": 38.082625759765506
}
}
}
}
},
"workers": {
"total": 0.44735229946672916,
"count": 64025,
"self": 0.0,
"children": {
"worker_root": {
"total": 879.4531953781843,
"count": 64025,
"is_parallel": true,
"self": 483.88998732343316,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0030177850276231766,
"count": 1,
"is_parallel": true,
"self": 0.0024100355803966522,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006077494472265244,
"count": 8,
"is_parallel": true,
"self": 0.0006077494472265244
}
}
},
"UnityEnvironment.step": {
"total": 0.019572174176573753,
"count": 1,
"is_parallel": true,
"self": 0.00020107999444007874,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017595477402210236,
"count": 1,
"is_parallel": true,
"self": 0.00017595477402210236
},
"communicator.exchange": {
"total": 0.01853463239967823,
"count": 1,
"is_parallel": true,
"self": 0.01853463239967823
},
"steps_from_proto": {
"total": 0.000660507008433342,
"count": 1,
"is_parallel": true,
"self": 0.00018372386693954468,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004767831414937973,
"count": 8,
"is_parallel": true,
"self": 0.0004767831414937973
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 395.56320805475116,
"count": 64024,
"is_parallel": true,
"self": 11.851040678098798,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.51964308321476,
"count": 64024,
"is_parallel": true,
"self": 8.51964308321476
},
"communicator.exchange": {
"total": 341.5894013158977,
"count": 64024,
"is_parallel": true,
"self": 341.5894013158977
},
"steps_from_proto": {
"total": 33.6031229775399,
"count": 64024,
"is_parallel": true,
"self": 9.047859590500593,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.555263387039304,
"count": 512192,
"is_parallel": true,
"self": 24.555263387039304
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 368.85962011292577,
"count": 64025,
"self": 1.1922533381730318,
"children": {
"process_trajectory": {
"total": 81.48987087607384,
"count": 64025,
"self": 81.01461503095925,
"children": {
"RLTrainer._checkpoint": {
"total": 0.47525584511458874,
"count": 2,
"self": 0.47525584511458874
}
}
},
"_update_policy": {
"total": 286.1774958986789,
"count": 451,
"self": 83.86539529636502,
"children": {
"TorchPPOOptimizer.update": {
"total": 202.31210060231388,
"count": 22800,
"self": 202.31210060231388
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.457550287246704e-07,
"count": 1,
"self": 5.457550287246704e-07
},
"TrainerController._save_models": {
"total": 0.2304192241281271,
"count": 1,
"self": 0.014375787228345871,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21604343689978123,
"count": 1,
"self": 0.21604343689978123
}
}
}
}
}
}
}