ppo-Pyramids / run_logs /timers.json
satcos's picture
First Push
ae480f7 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3206157088279724,
"min": 0.32021746039390564,
"max": 1.4969273805618286,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9715.9384765625,
"min": 9519.4248046875,
"max": 45410.7890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989906.0,
"min": 29962.0,
"max": 989906.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989906.0,
"min": 29962.0,
"max": 989906.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6253999471664429,
"min": -0.10757457464933395,
"max": 0.6515991687774658,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 178.23898315429688,
"min": -25.81789779663086,
"max": 189.6153564453125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02904333733022213,
"min": -0.008796537294983864,
"max": 0.16893044114112854,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 8.277351379394531,
"min": -2.392658233642578,
"max": 40.543304443359375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06510824000739997,
"min": 0.06400309069500683,
"max": 0.07389518441098167,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9115153601035996,
"min": 0.5911614752878533,
"max": 1.0464047590594774,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01548430218598261,
"min": 0.0005122963272211974,
"max": 0.016082287501149678,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21678023060375654,
"min": 0.007172148581096763,
"max": 0.2357938977206062,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.779204549821427e-06,
"min": 7.779204549821427e-06,
"max": 0.00029485327671557496,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010890886369749998,
"min": 0.00010890886369749998,
"max": 0.0033793604735465996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10259303571428571,
"min": 0.10259303571428571,
"max": 0.19828442500000001,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4363025,
"min": 1.4363025,
"max": 2.5264534000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002690442678571428,
"min": 0.0002690442678571428,
"max": 0.0098286140575,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037666197499999993,
"min": 0.0037666197499999993,
"max": 0.11267269465999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01312094647437334,
"min": 0.01312094647437334,
"max": 0.31183338165283203,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18369324505329132,
"min": 0.18369324505329132,
"max": 2.4946670532226562,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 300.82828282828285,
"min": 296.55445544554453,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29782.0,
"min": 16377.0,
"max": 33224.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.658753518730101,
"min": -0.9999750521965325,
"max": 1.6932783340363158,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 164.21659835428,
"min": -31.99920167028904,
"max": 166.0469985306263,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.658753518730101,
"min": -0.9999750521965325,
"max": 1.6932783340363158,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 164.21659835428,
"min": -31.99920167028904,
"max": 166.0469985306263,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.040827504855935164,
"min": 0.040827504855935164,
"max": 6.552657259956879,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.041922980737581,
"min": 4.041922980737581,
"max": 111.39517341926694,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704967791",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704970091"
},
"total": 2299.46336148,
"count": 1,
"self": 0.4763794929995129,
"children": {
"run_training.setup": {
"total": 0.05701916699990761,
"count": 1,
"self": 0.05701916699990761
},
"TrainerController.start_learning": {
"total": 2298.9299628200006,
"count": 1,
"self": 1.4238685660575356,
"children": {
"TrainerController._reset_env": {
"total": 1.98091042499982,
"count": 1,
"self": 1.98091042499982
},
"TrainerController.advance": {
"total": 2295.4390408779436,
"count": 63913,
"self": 1.4846470530437728,
"children": {
"env_step": {
"total": 1670.509673892977,
"count": 63913,
"self": 1538.7398123749404,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.92771622001464,
"count": 63913,
"self": 4.741195245010658,
"children": {
"TorchPolicy.evaluate": {
"total": 126.18652097500399,
"count": 62558,
"self": 126.18652097500399
}
}
},
"workers": {
"total": 0.8421452980219328,
"count": 63913,
"self": 0.0,
"children": {
"worker_root": {
"total": 2293.4965142380697,
"count": 63913,
"is_parallel": true,
"self": 876.6653904871423,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017616560003261839,
"count": 1,
"is_parallel": true,
"self": 0.0005985139996482758,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001163142000677908,
"count": 8,
"is_parallel": true,
"self": 0.001163142000677908
}
}
},
"UnityEnvironment.step": {
"total": 0.05131750900000043,
"count": 1,
"is_parallel": true,
"self": 0.0006226690006769786,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045917599982203683,
"count": 1,
"is_parallel": true,
"self": 0.00045917599982203683
},
"communicator.exchange": {
"total": 0.04847028999984104,
"count": 1,
"is_parallel": true,
"self": 0.04847028999984104
},
"steps_from_proto": {
"total": 0.001765373999660369,
"count": 1,
"is_parallel": true,
"self": 0.0003526400005284813,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014127339991318877,
"count": 8,
"is_parallel": true,
"self": 0.0014127339991318877
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1416.8311237509274,
"count": 63912,
"is_parallel": true,
"self": 34.985161095874446,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.144624263142305,
"count": 63912,
"is_parallel": true,
"self": 25.144624263142305
},
"communicator.exchange": {
"total": 1254.666200946947,
"count": 63912,
"is_parallel": true,
"self": 1254.666200946947
},
"steps_from_proto": {
"total": 102.03513744496377,
"count": 63912,
"is_parallel": true,
"self": 20.626056165771843,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.40908127919192,
"count": 511296,
"is_parallel": true,
"self": 81.40908127919192
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 623.4447199319229,
"count": 63913,
"self": 2.7014012849940627,
"children": {
"process_trajectory": {
"total": 129.11494960193113,
"count": 63913,
"self": 128.92141814193155,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19353145999957633,
"count": 2,
"self": 0.19353145999957633
}
}
},
"_update_policy": {
"total": 491.6283690449977,
"count": 450,
"self": 290.99894024301284,
"children": {
"TorchPPOOptimizer.update": {
"total": 200.6294288019849,
"count": 22749,
"self": 200.6294288019849
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2359996617306024e-06,
"count": 1,
"self": 1.2359996617306024e-06
},
"TrainerController._save_models": {
"total": 0.08614171499993972,
"count": 1,
"self": 0.0014308230001915945,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08471089199974813,
"count": 1,
"self": 0.08471089199974813
}
}
}
}
}
}
}