Pyramids / run_logs /timers.json
kmposkid1's picture
First Push
be8e744
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.738983690738678,
"min": 0.49195733666419983,
"max": 1.3118427991867065,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 22145.86328125,
"min": 14947.6318359375,
"max": 39796.0625,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479908.0,
"min": 29903.0,
"max": 479908.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479908.0,
"min": 29903.0,
"max": 479908.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.03713126853108406,
"min": -0.10277976840734482,
"max": 0.03713126853108406,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 9.097160339355469,
"min": -24.872703552246094,
"max": 9.097160339355469,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.04024461656808853,
"min": 0.0300200954079628,
"max": 0.5797380805015564,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.859930992126465,
"min": 7.354923248291016,
"max": 137.39791870117188,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06864996377992737,
"min": 0.06540100464085023,
"max": 0.07365103193059065,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9610994929189831,
"min": 0.5155572235141346,
"max": 0.9988405941670127,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.005410192468505057,
"min": 0.0009231757796349264,
"max": 0.013681946543527728,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.0757426945590708,
"min": 0.010522280838797885,
"max": 0.0957736258046941,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.1345735741928573e-05,
"min": 2.1345735741928573e-05,
"max": 0.0002904700317480857,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00029884030038700003,
"min": 0.00029884030038700003,
"max": 0.0028536009487998,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10711521428571429,
"min": 0.10711521428571429,
"max": 0.19682334285714287,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.499613,
"min": 1.3777634,
"max": 2.3396082000000002,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0007208099071428573,
"min": 0.0007208099071428573,
"max": 0.009682651951428572,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.010091338700000002,
"min": 0.010091338700000002,
"max": 0.09515489998,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.027235398069024086,
"min": 0.027235398069024086,
"max": 0.5587548017501831,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.3812955617904663,
"min": 0.3812955617904663,
"max": 3.9112837314605713,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 851.5833333333334,
"min": 851.4117647058823,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30657.0,
"min": 16702.0,
"max": 32277.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.12967226654291153,
"min": -0.999962551984936,
"max": -0.12967226654291153,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -4.668201595544815,
"min": -31.998801663517952,
"max": -4.668201595544815,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.12967226654291153,
"min": -0.999962551984936,
"max": -0.12967226654291153,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -4.668201595544815,
"min": -31.998801663517952,
"max": -4.668201595544815,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.23885506172923165,
"min": 0.23885506172923165,
"max": 11.404472938355278,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 8.59878222225234,
"min": 8.241848946548998,
"max": 193.87603995203972,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679489615",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679490564"
},
"total": 949.5795500689997,
"count": 1,
"self": 0.476823033999608,
"children": {
"run_training.setup": {
"total": 0.11458128899994335,
"count": 1,
"self": 0.11458128899994335
},
"TrainerController.start_learning": {
"total": 948.9881457460001,
"count": 1,
"self": 0.6016137709725626,
"children": {
"TrainerController._reset_env": {
"total": 7.070178240000132,
"count": 1,
"self": 7.070178240000132
},
"TrainerController.advance": {
"total": 941.2240850960275,
"count": 31620,
"self": 0.6592998469609483,
"children": {
"env_step": {
"total": 637.2980345040755,
"count": 31620,
"self": 586.0284019521023,
"children": {
"SubprocessEnvManager._take_step": {
"total": 50.89524606097439,
"count": 31620,
"self": 2.2538464990061584,
"children": {
"TorchPolicy.evaluate": {
"total": 48.64139956196823,
"count": 31317,
"self": 48.64139956196823
}
}
},
"workers": {
"total": 0.3743864909988588,
"count": 31620,
"self": 0.0,
"children": {
"worker_root": {
"total": 947.0046194860397,
"count": 31620,
"is_parallel": true,
"self": 414.42788671301423,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017473559996687982,
"count": 1,
"is_parallel": true,
"self": 0.0005700420001630846,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011773139995057136,
"count": 8,
"is_parallel": true,
"self": 0.0011773139995057136
}
}
},
"UnityEnvironment.step": {
"total": 0.04748465100010435,
"count": 1,
"is_parallel": true,
"self": 0.0005115090002618672,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004314859997975873,
"count": 1,
"is_parallel": true,
"self": 0.0004314859997975873
},
"communicator.exchange": {
"total": 0.044885274000080244,
"count": 1,
"is_parallel": true,
"self": 0.044885274000080244
},
"steps_from_proto": {
"total": 0.0016563819999646512,
"count": 1,
"is_parallel": true,
"self": 0.00037083900042489404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012855429995397571,
"count": 8,
"is_parallel": true,
"self": 0.0012855429995397571
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 532.5767327730255,
"count": 31619,
"is_parallel": true,
"self": 15.02941522491301,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.005025104008382,
"count": 31619,
"is_parallel": true,
"self": 11.005025104008382
},
"communicator.exchange": {
"total": 462.5483772040575,
"count": 31619,
"is_parallel": true,
"self": 462.5483772040575
},
"steps_from_proto": {
"total": 43.99391524004659,
"count": 31619,
"is_parallel": true,
"self": 9.238356631018178,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.75555860902841,
"count": 252952,
"is_parallel": true,
"self": 34.75555860902841
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 303.266750744991,
"count": 31620,
"self": 1.1163099479704215,
"children": {
"process_trajectory": {
"total": 55.47470573201781,
"count": 31620,
"self": 55.33351731701805,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14118841499976043,
"count": 1,
"self": 0.14118841499976043
}
}
},
"_update_policy": {
"total": 246.67573506500275,
"count": 217,
"self": 156.72970990401336,
"children": {
"TorchPPOOptimizer.update": {
"total": 89.9460251609894,
"count": 11418,
"self": 89.9460251609894
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.018000148178544e-06,
"count": 1,
"self": 1.018000148178544e-06
},
"TrainerController._save_models": {
"total": 0.09226762099979169,
"count": 1,
"self": 0.0014073519996600226,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09086026900013167,
"count": 1,
"self": 0.09086026900013167
}
}
}
}
}
}
}