PyramidsRND / run_logs /timers.json
mojemai's picture
First Push
4f9d4ad
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.422071635723114,
"min": 0.422071635723114,
"max": 1.5132286548614502,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12587.8642578125,
"min": 12587.8642578125,
"max": 45905.3046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989886.0,
"min": 29952.0,
"max": 989886.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989886.0,
"min": 29952.0,
"max": 989886.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6275933384895325,
"min": -0.09996095299720764,
"max": 0.6682064533233643,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 180.74688720703125,
"min": -24.09058952331543,
"max": 190.4388427734375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.008181886747479439,
"min": -0.04465390741825104,
"max": 0.32788509130477905,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.3563833236694336,
"min": -12.592401504516602,
"max": 78.69242095947266,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06464568574253159,
"min": 0.06464568574253159,
"max": 0.07422114249187609,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9696852861379738,
"min": 0.5028729667134741,
"max": 1.0871423517431444,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01517917571731636,
"min": 0.0005626130253311197,
"max": 0.01517917571731636,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2276876357597454,
"min": 0.006751356303973436,
"max": 0.2276876357597454,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.479157506979997e-06,
"min": 7.479157506979997e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011218736260469996,
"min": 0.00011218736260469996,
"max": 0.0035069249310250997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249301999999999,
"min": 0.10249301999999999,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373952999999998,
"min": 1.3691136000000002,
"max": 2.5689748999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000259052698,
"min": 0.000259052698,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038857904699999995,
"min": 0.0038857904699999995,
"max": 0.11692059251,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007248158100992441,
"min": 0.007248158100992441,
"max": 0.3603551983833313,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10872237384319305,
"min": 0.10486799478530884,
"max": 2.522486448287964,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 309.29,
"min": 287.40384615384613,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30929.0,
"min": 15984.0,
"max": 33098.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6106799863278867,
"min": -1.0000000521540642,
"max": 1.6984148772155985,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 161.06799863278866,
"min": -32.000001668930054,
"max": 174.40799809247255,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6106799863278867,
"min": -1.0000000521540642,
"max": 1.6984148772155985,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 161.06799863278866,
"min": -32.000001668930054,
"max": 174.40799809247255,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02356862095708493,
"min": 0.02356862095708493,
"max": 7.722775853704661,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.356862095708493,
"min": 2.1633324651484145,
"max": 123.56441365927458,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682419552",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682422415"
},
"total": 2862.6273221170004,
"count": 1,
"self": 0.8926431510008115,
"children": {
"run_training.setup": {
"total": 0.12418018100015615,
"count": 1,
"self": 0.12418018100015615
},
"TrainerController.start_learning": {
"total": 2861.6104987849994,
"count": 1,
"self": 2.161786741834476,
"children": {
"TrainerController._reset_env": {
"total": 5.591077189999851,
"count": 1,
"self": 5.591077189999851
},
"TrainerController.advance": {
"total": 2853.6907759621654,
"count": 64031,
"self": 2.2535104044218315,
"children": {
"env_step": {
"total": 2113.630535597904,
"count": 64031,
"self": 1960.4012911889122,
"children": {
"SubprocessEnvManager._take_step": {
"total": 151.86478463800358,
"count": 64031,
"self": 6.4070491099819264,
"children": {
"TorchPolicy.evaluate": {
"total": 145.45773552802166,
"count": 62565,
"self": 145.45773552802166
}
}
},
"workers": {
"total": 1.3644597709881054,
"count": 64031,
"self": 0.0,
"children": {
"worker_root": {
"total": 2853.61988177036,
"count": 64031,
"is_parallel": true,
"self": 1050.888898607217,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001968698000382574,
"count": 1,
"is_parallel": true,
"self": 0.0005687170014425647,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013999809989400092,
"count": 8,
"is_parallel": true,
"self": 0.0013999809989400092
}
}
},
"UnityEnvironment.step": {
"total": 0.10138852399995812,
"count": 1,
"is_parallel": true,
"self": 0.000603378000050725,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048393999986728886,
"count": 1,
"is_parallel": true,
"self": 0.00048393999986728886
},
"communicator.exchange": {
"total": 0.09841282999968826,
"count": 1,
"is_parallel": true,
"self": 0.09841282999968826
},
"steps_from_proto": {
"total": 0.0018883760003518546,
"count": 1,
"is_parallel": true,
"self": 0.0003923249996660161,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014960510006858385,
"count": 8,
"is_parallel": true,
"self": 0.0014960510006858385
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1802.7309831631428,
"count": 64030,
"is_parallel": true,
"self": 41.97936773039146,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.069950950238308,
"count": 64030,
"is_parallel": true,
"self": 28.069950950238308
},
"communicator.exchange": {
"total": 1613.5301478057754,
"count": 64030,
"is_parallel": true,
"self": 1613.5301478057754
},
"steps_from_proto": {
"total": 119.15151667673763,
"count": 64030,
"is_parallel": true,
"self": 26.81893368677447,
"children": {
"_process_rank_one_or_two_observation": {
"total": 92.33258298996316,
"count": 512240,
"is_parallel": true,
"self": 92.33258298996316
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 737.8067299598397,
"count": 64031,
"self": 4.196896834826475,
"children": {
"process_trajectory": {
"total": 128.31894823801122,
"count": 64031,
"self": 127.97126235200994,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3476858860012726,
"count": 2,
"self": 0.3476858860012726
}
}
},
"_update_policy": {
"total": 605.290884887002,
"count": 446,
"self": 384.8675363158454,
"children": {
"TorchPPOOptimizer.update": {
"total": 220.42334857115657,
"count": 22824,
"self": 220.42334857115657
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3640001270687208e-06,
"count": 1,
"self": 1.3640001270687208e-06
},
"TrainerController._save_models": {
"total": 0.16685752699959266,
"count": 1,
"self": 0.005556623000302352,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1613009039992903,
"count": 1,
"self": 0.1613009039992903
}
}
}
}
}
}
}