ppo-Pyramids / run_logs /timers.json
Neomedallion's picture
First Push Pyramids 1M
116304d verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.41841045022010803,
"min": 0.41841045022010803,
"max": 1.4391876459121704,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12505.451171875,
"min": 12505.451171875,
"max": 43659.1953125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989914.0,
"min": 29952.0,
"max": 989914.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989914.0,
"min": 29952.0,
"max": 989914.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3485969007015228,
"min": -0.10552112013101578,
"max": 0.3562856614589691,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 91.33238983154297,
"min": -25.325069427490234,
"max": 92.99055480957031,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.17564959824085236,
"min": -0.07352815568447113,
"max": 0.22996686398983002,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 46.02019500732422,
"min": -19.337905883789062,
"max": 60.0213508605957,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06749834354899922,
"min": 0.06523323707404502,
"max": 0.07282965667606967,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0124751532349883,
"min": 0.5098075967324877,
"max": 1.0303891778360896,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01347067835820638,
"min": 8.857589274066465e-05,
"max": 0.01943193745030761,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2020601753730957,
"min": 0.0011514866056286405,
"max": 0.27204712430430655,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.57287747574e-06,
"min": 7.57287747574e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011359316213609999,
"min": 0.00011359316213609999,
"max": 0.0033818594727135997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252426,
"min": 0.10252426,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5378639,
"min": 1.3886848,
"max": 2.5272864000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000262173574,
"min": 0.000262173574,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00393260361,
"min": 0.00393260361,
"max": 0.11275591136,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008787771686911583,
"min": 0.008787771686911583,
"max": 0.3785453140735626,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1318165808916092,
"min": 0.12414293736219406,
"max": 2.6498172283172607,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 516.2666666666667,
"min": 480.271186440678,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30976.0,
"min": 15984.0,
"max": 33111.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.259199970203345,
"min": -1.0000000521540642,
"max": 1.3841016653735758,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 76.81119818240404,
"min": -31.998801663517952,
"max": 81.66199825704098,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.259199970203345,
"min": -1.0000000521540642,
"max": 1.3841016653735758,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 76.81119818240404,
"min": -31.998801663517952,
"max": 81.66199825704098,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0460248265070001,
"min": 0.0460248265070001,
"max": 8.053747095167637,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.807514416927006,
"min": 2.67294980227598,
"max": 128.8599535226822,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709534429",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids_Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1709536490"
},
"total": 2061.1927714169997,
"count": 1,
"self": 0.5289975879991289,
"children": {
"run_training.setup": {
"total": 0.07425835200001529,
"count": 1,
"self": 0.07425835200001529
},
"TrainerController.start_learning": {
"total": 2060.5895154770005,
"count": 1,
"self": 1.2601521869837597,
"children": {
"TrainerController._reset_env": {
"total": 2.4570604100001674,
"count": 1,
"self": 2.4570604100001674
},
"TrainerController.advance": {
"total": 2056.782510261017,
"count": 63397,
"self": 1.4536977440629926,
"children": {
"env_step": {
"total": 1434.6834951690203,
"count": 63397,
"self": 1308.7354553850116,
"children": {
"SubprocessEnvManager._take_step": {
"total": 125.13575199401134,
"count": 63397,
"self": 4.466975661128117,
"children": {
"TorchPolicy.evaluate": {
"total": 120.66877633288323,
"count": 62551,
"self": 120.66877633288323
}
}
},
"workers": {
"total": 0.8122877899972991,
"count": 63397,
"self": 0.0,
"children": {
"worker_root": {
"total": 2055.7899448640005,
"count": 63397,
"is_parallel": true,
"self": 857.6178374160486,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002104122999980973,
"count": 1,
"is_parallel": true,
"self": 0.0006368169999859674,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014673059999950055,
"count": 8,
"is_parallel": true,
"self": 0.0014673059999950055
}
}
},
"UnityEnvironment.step": {
"total": 0.052662772000076075,
"count": 1,
"is_parallel": true,
"self": 0.0006297029999586812,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048228300011032843,
"count": 1,
"is_parallel": true,
"self": 0.00048228300011032843
},
"communicator.exchange": {
"total": 0.04982094499996492,
"count": 1,
"is_parallel": true,
"self": 0.04982094499996492
},
"steps_from_proto": {
"total": 0.0017298410000421427,
"count": 1,
"is_parallel": true,
"self": 0.000386235000405577,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013436059996365657,
"count": 8,
"is_parallel": true,
"self": 0.0013436059996365657
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1198.172107447952,
"count": 63396,
"is_parallel": true,
"self": 34.5177084190118,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.29398646696518,
"count": 63396,
"is_parallel": true,
"self": 24.29398646696518
},
"communicator.exchange": {
"total": 1042.8833484409665,
"count": 63396,
"is_parallel": true,
"self": 1042.8833484409665
},
"steps_from_proto": {
"total": 96.47706412100842,
"count": 63396,
"is_parallel": true,
"self": 19.035663608053937,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.44140051295449,
"count": 507168,
"is_parallel": true,
"self": 77.44140051295449
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 620.6453173479333,
"count": 63397,
"self": 2.4407180488881295,
"children": {
"process_trajectory": {
"total": 121.37889651805153,
"count": 63397,
"self": 121.18412312905139,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1947733890001473,
"count": 2,
"self": 0.1947733890001473
}
}
},
"_update_policy": {
"total": 496.82570278099365,
"count": 448,
"self": 292.41547293694475,
"children": {
"TorchPPOOptimizer.update": {
"total": 204.4102298440489,
"count": 22830,
"self": 204.4102298440489
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.920001164369751e-07,
"count": 1,
"self": 8.920001164369751e-07
},
"TrainerController._save_models": {
"total": 0.08979172699991977,
"count": 1,
"self": 0.0016266019997601688,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0881651250001596,
"count": 1,
"self": 0.0881651250001596
}
}
}
}
}
}
}