pyramids / run_logs /timers.json
pm390's picture
First Pyramids
339bb22
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4877895414829254,
"min": 0.4877895414829254,
"max": 1.4276467561721802,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14750.755859375,
"min": 14750.755859375,
"max": 43309.09375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989971.0,
"min": 29985.0,
"max": 989971.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989971.0,
"min": 29985.0,
"max": 989971.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3313111960887909,
"min": -0.10459650307893753,
"max": 0.461904913187027,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 87.79747009277344,
"min": -25.103160858154297,
"max": 123.79051971435547,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.6019312739372253,
"min": -0.61241215467453,
"max": 0.6019312739372253,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 159.51177978515625,
"min": -160.4519805908203,
"max": 159.51177978515625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0682120603700501,
"min": 0.06434755042343766,
"max": 0.07310072194824259,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9549688451807016,
"min": 0.5117050536376981,
"max": 1.042244601577598,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.06812883828977301,
"min": 0.00031434256763016787,
"max": 0.06812883828977301,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.9538037360568221,
"min": 0.0037721108115620146,
"max": 0.9538037360568221,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.666061730392857e-06,
"min": 7.666061730392857e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010732486422549999,
"min": 0.00010732486422549999,
"max": 0.0035072270309243997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255532142857145,
"min": 0.10255532142857145,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4357745000000002,
"min": 1.3886848,
"max": 2.5690756,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026527661071428573,
"min": 0.00026527661071428573,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037138725500000004,
"min": 0.0037138725500000004,
"max": 0.11693065244000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006950224284082651,
"min": 0.006950224284082651,
"max": 0.35153621435165405,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09730313718318939,
"min": 0.09730313718318939,
"max": 2.4607534408569336,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 508.672131147541,
"min": 412.87142857142857,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31029.0,
"min": 16784.0,
"max": 33024.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2617409566386801,
"min": -0.9999000518582761,
"max": 1.5585542600069726,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 76.96619835495949,
"min": -31.996801659464836,
"max": 109.09879820048809,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2617409566386801,
"min": -0.9999000518582761,
"max": 1.5585542600069726,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 76.96619835495949,
"min": -31.996801659464836,
"max": 109.09879820048809,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03641195466945169,
"min": 0.03412701004395226,
"max": 6.754271524355692,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.2211292348365532,
"min": 2.2182556528568966,
"max": 114.82261591404676,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1657309006",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1657311122"
},
"total": 2116.146267751,
"count": 1,
"self": 0.48194089599974177,
"children": {
"run_training.setup": {
"total": 0.043788979999931144,
"count": 1,
"self": 0.043788979999931144
},
"TrainerController.start_learning": {
"total": 2115.620537875,
"count": 1,
"self": 1.5764783300428462,
"children": {
"TrainerController._reset_env": {
"total": 10.003945166999983,
"count": 1,
"self": 10.003945166999983
},
"TrainerController.advance": {
"total": 2103.946173620957,
"count": 63591,
"self": 1.5861168209839889,
"children": {
"env_step": {
"total": 1338.7314306589508,
"count": 63591,
"self": 1226.159380111992,
"children": {
"SubprocessEnvManager._take_step": {
"total": 111.75133710099931,
"count": 63591,
"self": 4.817163276003271,
"children": {
"TorchPolicy.evaluate": {
"total": 106.93417382499604,
"count": 62549,
"self": 36.33038118001116,
"children": {
"TorchPolicy.sample_actions": {
"total": 70.60379264498488,
"count": 62549,
"self": 70.60379264498488
}
}
}
}
},
"workers": {
"total": 0.8207134459595409,
"count": 63591,
"self": 0.0,
"children": {
"worker_root": {
"total": 2111.54045398497,
"count": 63591,
"is_parallel": true,
"self": 990.234894074963,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005513232999987849,
"count": 1,
"is_parallel": true,
"self": 0.00412801499987836,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013852180001094894,
"count": 8,
"is_parallel": true,
"self": 0.0013852180001094894
}
}
},
"UnityEnvironment.step": {
"total": 0.05224870300003204,
"count": 1,
"is_parallel": true,
"self": 0.0005017249999355045,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00028810300000259303,
"count": 1,
"is_parallel": true,
"self": 0.00028810300000259303
},
"communicator.exchange": {
"total": 0.049762918000055834,
"count": 1,
"is_parallel": true,
"self": 0.049762918000055834
},
"steps_from_proto": {
"total": 0.0016959570000381063,
"count": 1,
"is_parallel": true,
"self": 0.00045111800022823445,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001244838999809872,
"count": 8,
"is_parallel": true,
"self": 0.001244838999809872
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1121.305559910007,
"count": 63590,
"is_parallel": true,
"self": 29.06304668602661,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.354639281003642,
"count": 63590,
"is_parallel": true,
"self": 24.354639281003642
},
"communicator.exchange": {
"total": 970.7875338350049,
"count": 63590,
"is_parallel": true,
"self": 970.7875338350049
},
"steps_from_proto": {
"total": 97.10034010797199,
"count": 63590,
"is_parallel": true,
"self": 24.395352890857794,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.7049872171142,
"count": 508720,
"is_parallel": true,
"self": 72.7049872171142
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 763.628626141022,
"count": 63591,
"self": 2.800404878980885,
"children": {
"process_trajectory": {
"total": 174.346729461034,
"count": 63591,
"self": 174.13018816303418,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21654129799981092,
"count": 2,
"self": 0.21654129799981092
}
}
},
"_update_policy": {
"total": 586.4814918010071,
"count": 451,
"self": 231.45633428899566,
"children": {
"TorchPPOOptimizer.update": {
"total": 355.02515751201145,
"count": 22788,
"self": 355.02515751201145
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.193000116472831e-06,
"count": 1,
"self": 1.193000116472831e-06
},
"TrainerController._save_models": {
"total": 0.09393956400026582,
"count": 1,
"self": 0.0016233070005000627,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09231625699976576,
"count": 1,
"self": 0.09231625699976576
}
}
}
}
}
}
}