ppo-Pyramids / run_logs /timers.json
z4x's picture
nonsense
cfced29
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8386691808700562,
"min": 0.672394335269928,
"max": 1.6090904474258423,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 2227.50537109375,
"min": 1567.004638671875,
"max": 6590.83447265625,
"count": 100
},
"Pyramids.Step.mean": {
"value": 299985.0,
"min": 2944.0,
"max": 299985.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 299985.0,
"min": 2944.0,
"max": 299985.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0198122076690197,
"min": -0.15639443695545197,
"max": 0.04895129054784775,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.4953051805496216,
"min": -3.597071886062622,
"max": 1.2237823009490967,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.048741064965724945,
"min": -0.159381702542305,
"max": 0.3734317719936371,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.2185266017913818,
"min": -3.6657791137695312,
"max": 9.531417846679688,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06939222758713488,
"min": 0.05129323783330619,
"max": 0.08596819670250018,
"count": 93
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.06939222758713488,
"min": 0.05129323783330619,
"max": 0.15416727164833277,
"count": 93
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.007183108794076058,
"min": 0.0001334021358161408,
"max": 0.02406926324086574,
"count": 93
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.007183108794076058,
"min": 0.0001334021358161408,
"max": 0.02803544798977479,
"count": 93
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5270994910000118e-06,
"min": 1.5270994910000118e-06,
"max": 0.0002959040013653333,
"count": 93
},
"Pyramids.Policy.LearningRate.sum": {
"value": 1.5270994910000118e-06,
"min": 1.5270994910000118e-06,
"max": 0.0004536380487873333,
"count": 93
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.100509,
"min": 0.100509,
"max": 0.1986346666666667,
"count": 93
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.100509,
"min": 0.100509,
"max": 0.3512126666666667,
"count": 93
},
"Pyramids.Policy.Beta.mean": {
"value": 6.084910000000039e-05,
"min": 6.084910000000039e-05,
"max": 0.009863603200000001,
"count": 93
},
"Pyramids.Policy.Beta.sum": {
"value": 6.084910000000039e-05,
"min": 6.084910000000039e-05,
"max": 0.015126145399999999,
"count": 93
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.044944848865270615,
"min": 0.0422811433672905,
"max": 1.0479909181594849,
"count": 93
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.044944848865270615,
"min": 0.04323822259902954,
"max": 1.0479909181594849,
"count": 93
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 933.3333333333334,
"min": 283.0,
"max": 999.0,
"count": 64
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 5600.0,
"min": 283.0,
"max": 15984.0,
"count": 64
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.5207600563764572,
"min": -1.0000000521540642,
"max": 1.717000037431717,
"count": 68
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -2.603800281882286,
"min": -16.000000834465027,
"max": 3.8583998680114746,
"count": 68
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.5207600563764572,
"min": -1.0000000521540642,
"max": 1.717000037431717,
"count": 68
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -2.603800281882286,
"min": -16.000000834465027,
"max": 3.8583998680114746,
"count": 68
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.4332942306995392,
"min": 0.2255028709769249,
"max": 9.63311800956726,
"count": 68
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.166471153497696,
"min": 0.2255028709769249,
"max": 96.33118009567261,
"count": 68
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676054733",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676055353"
},
"total": 620.802438787,
"count": 1,
"self": 0.732544941000242,
"children": {
"run_training.setup": {
"total": 0.1652681749999374,
"count": 1,
"self": 0.1652681749999374
},
"TrainerController.start_learning": {
"total": 619.9046256709998,
"count": 1,
"self": 0.3725324809797712,
"children": {
"TrainerController._reset_env": {
"total": 7.0516776140000275,
"count": 1,
"self": 7.0516776140000275
},
"TrainerController.advance": {
"total": 612.3837014210199,
"count": 18921,
"self": 0.38513648003413437,
"children": {
"env_step": {
"total": 394.9876312099814,
"count": 18921,
"self": 361.6783296569463,
"children": {
"SubprocessEnvManager._take_step": {
"total": 33.08237924302102,
"count": 18921,
"self": 1.3507869650156863,
"children": {
"TorchPolicy.evaluate": {
"total": 31.731592278005337,
"count": 18796,
"self": 10.677259100004221,
"children": {
"TorchPolicy.sample_actions": {
"total": 21.054333178001116,
"count": 18796,
"self": 21.054333178001116
}
}
}
}
},
"workers": {
"total": 0.2269223100140607,
"count": 18921,
"self": 0.0,
"children": {
"worker_root": {
"total": 618.5263484090012,
"count": 18921,
"is_parallel": true,
"self": 289.65094372599015,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025638600000092993,
"count": 1,
"is_parallel": true,
"self": 0.0008832359999360051,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016806240000732942,
"count": 8,
"is_parallel": true,
"self": 0.0016806240000732942
}
}
},
"UnityEnvironment.step": {
"total": 0.07237013600001774,
"count": 1,
"is_parallel": true,
"self": 0.0005669140002737549,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004382759998406982,
"count": 1,
"is_parallel": true,
"self": 0.0004382759998406982
},
"communicator.exchange": {
"total": 0.06910408499993537,
"count": 1,
"is_parallel": true,
"self": 0.06910408499993537
},
"steps_from_proto": {
"total": 0.0022608609999679175,
"count": 1,
"is_parallel": true,
"self": 0.0009180509998714115,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001342810000096506,
"count": 8,
"is_parallel": true,
"self": 0.001342810000096506
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 328.8754046830111,
"count": 18920,
"is_parallel": true,
"self": 9.010022927006958,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.504192668020551,
"count": 18920,
"is_parallel": true,
"self": 6.504192668020551
},
"communicator.exchange": {
"total": 284.08910702098365,
"count": 18920,
"is_parallel": true,
"self": 284.08910702098365
},
"steps_from_proto": {
"total": 29.27208206699993,
"count": 18920,
"is_parallel": true,
"self": 6.311371723993716,
"children": {
"_process_rank_one_or_two_observation": {
"total": 22.960710343006212,
"count": 151360,
"is_parallel": true,
"self": 22.960710343006212
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 217.01093373100434,
"count": 18921,
"self": 0.5888219080031831,
"children": {
"process_trajectory": {
"total": 47.47117167900251,
"count": 18921,
"self": 47.47117167900251
},
"_update_policy": {
"total": 168.95094014399865,
"count": 121,
"self": 64.30335902100114,
"children": {
"TorchPPOOptimizer.update": {
"total": 104.64758112299751,
"count": 6873,
"self": 104.64758112299751
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.910001270123757e-07,
"count": 1,
"self": 8.910001270123757e-07
},
"TrainerController._save_models": {
"total": 0.09671326400007274,
"count": 1,
"self": 0.0013535360001242225,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09535972799994852,
"count": 1,
"self": 0.09535972799994852
}
}
}
}
}
}
}