Pyramids / run_logs /timers.json
Kibalama's picture
next
8104946 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.37221190333366394,
"min": 0.37221190333366394,
"max": 1.4548053741455078,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11208.044921875,
"min": 11208.044921875,
"max": 44132.9765625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989996.0,
"min": 29952.0,
"max": 989996.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989996.0,
"min": 29952.0,
"max": 989996.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5196493268013,
"min": -0.146745502948761,
"max": 0.5714281797409058,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 142.903564453125,
"min": -35.365665435791016,
"max": 158.2855987548828,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.026558618992567062,
"min": 0.006787791382521391,
"max": 0.3601658344268799,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.303620338439941,
"min": 1.8530670404434204,
"max": 87.1601333618164,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06905311138516047,
"min": 0.0656891417304342,
"max": 0.07295907923856053,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9667435593922467,
"min": 0.48912309593656206,
"max": 1.0518215575333065,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014608238636615816,
"min": 0.0011141735972950099,
"max": 0.016981143901440586,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20451534091262144,
"min": 0.014484256764835128,
"max": 0.2377360146201682,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.569290334078571e-06,
"min": 7.569290334078571e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001059700646771,
"min": 0.0001059700646771,
"max": 0.0036330253889915996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025230642857143,
"min": 0.1025230642857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4353229,
"min": 1.3886848,
"max": 2.6110084000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026205412214285714,
"min": 0.00026205412214285714,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00366875771,
"min": 0.00366875771,
"max": 0.12111973916,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01264817826449871,
"min": 0.01264817826449871,
"max": 0.5465950965881348,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17707449197769165,
"min": 0.17707449197769165,
"max": 3.8261656761169434,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 383.02564102564105,
"min": 324.0659340659341,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29876.0,
"min": 15984.0,
"max": 33005.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5656794714622009,
"min": -1.0000000521540642,
"max": 1.6319670174475556,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 122.12299877405167,
"min": -28.322001732885838,
"max": 148.50899858772755,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5656794714622009,
"min": -1.0000000521540642,
"max": 1.6319670174475556,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 122.12299877405167,
"min": -28.322001732885838,
"max": 148.50899858772755,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05026566562278626,
"min": 0.04426909563841511,
"max": 10.49105979129672,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.9207219185773283,
"min": 3.9207219185773283,
"max": 167.85695666074753,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1719742684",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1719744910"
},
"total": 2226.379663939001,
"count": 1,
"self": 0.848578896001527,
"children": {
"run_training.setup": {
"total": 0.05253346900008182,
"count": 1,
"self": 0.05253346900008182
},
"TrainerController.start_learning": {
"total": 2225.4785515739995,
"count": 1,
"self": 1.3184406959844637,
"children": {
"TrainerController._reset_env": {
"total": 2.4576229060003243,
"count": 1,
"self": 2.4576229060003243
},
"TrainerController.advance": {
"total": 2221.5818873290145,
"count": 63865,
"self": 1.3760955712150462,
"children": {
"env_step": {
"total": 1578.4086364178274,
"count": 63865,
"self": 1446.0713860370797,
"children": {
"SubprocessEnvManager._take_step": {
"total": 131.5442363958573,
"count": 63865,
"self": 4.583290846821001,
"children": {
"TorchPolicy.evaluate": {
"total": 126.9609455490363,
"count": 62564,
"self": 126.9609455490363
}
}
},
"workers": {
"total": 0.7930139848904219,
"count": 63865,
"self": 0.0,
"children": {
"worker_root": {
"total": 2220.1792534261585,
"count": 63865,
"is_parallel": true,
"self": 894.364544693176,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021413410004242905,
"count": 1,
"is_parallel": true,
"self": 0.000634246003755834,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015070949966684566,
"count": 8,
"is_parallel": true,
"self": 0.0015070949966684566
}
}
},
"UnityEnvironment.step": {
"total": 0.07230310599970835,
"count": 1,
"is_parallel": true,
"self": 0.000616320999142772,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000458636000075785,
"count": 1,
"is_parallel": true,
"self": 0.000458636000075785
},
"communicator.exchange": {
"total": 0.06975871000031475,
"count": 1,
"is_parallel": true,
"self": 0.06975871000031475
},
"steps_from_proto": {
"total": 0.0014694390001750435,
"count": 1,
"is_parallel": true,
"self": 0.000300138000966399,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011693009992086445,
"count": 8,
"is_parallel": true,
"self": 0.0011693009992086445
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1325.8147087329826,
"count": 63864,
"is_parallel": true,
"self": 33.70914165596605,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.75627686795451,
"count": 63864,
"is_parallel": true,
"self": 22.75627686795451
},
"communicator.exchange": {
"total": 1172.4692242190895,
"count": 63864,
"is_parallel": true,
"self": 1172.4692242190895
},
"steps_from_proto": {
"total": 96.88006598997254,
"count": 63864,
"is_parallel": true,
"self": 19.317813873937666,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.56225211603487,
"count": 510912,
"is_parallel": true,
"self": 77.56225211603487
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 641.797155339972,
"count": 63865,
"self": 2.5684806950812344,
"children": {
"process_trajectory": {
"total": 127.1017614658931,
"count": 63865,
"self": 126.8772784618941,
"children": {
"RLTrainer._checkpoint": {
"total": 0.224483003999012,
"count": 2,
"self": 0.224483003999012
}
}
},
"_update_policy": {
"total": 512.1269131789977,
"count": 453,
"self": 304.8613184088772,
"children": {
"TorchPPOOptimizer.update": {
"total": 207.26559477012052,
"count": 22809,
"self": 207.26559477012052
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1330012057442218e-06,
"count": 1,
"self": 1.1330012057442218e-06
},
"TrainerController._save_models": {
"total": 0.12059950999901048,
"count": 1,
"self": 0.0019264239981566789,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1186730860008538,
"count": 1,
"self": 0.1186730860008538
}
}
}
}
}
}
}