PyramidsRND / run_logs /timers.json
andylolu24's picture
Initial commit
b11f06f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.704181432723999,
"min": 0.704181432723999,
"max": 1.4420181512832642,
"count": 10
},
"Pyramids.Policy.Entropy.sum": {
"value": 21091.642578125,
"min": 21091.642578125,
"max": 43745.0625,
"count": 10
},
"Pyramids.Step.mean": {
"value": 299998.0,
"min": 29939.0,
"max": 299998.0,
"count": 10
},
"Pyramids.Step.sum": {
"value": 299998.0,
"min": 29939.0,
"max": 299998.0,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.06615421921014786,
"min": -0.21818986535072327,
"max": -0.03297995403409004,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -15.943167686462402,
"min": -51.71099853515625,
"max": -7.915188789367676,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03181059658527374,
"min": 0.03181059658527374,
"max": 0.4717663824558258,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.666354179382324,
"min": 7.666354179382324,
"max": 111.80863189697266,
"count": 10
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06608748776559592,
"min": 0.0650946730015156,
"max": 0.07354978325885465,
"count": 10
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9252248287183429,
"min": 0.5059748568045893,
"max": 0.9561471823651104,
"count": 10
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0015892796493671452,
"min": 0.000761867436767018,
"max": 0.006258429338666095,
"count": 10
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.02224991509114003,
"min": 0.010666144114738253,
"max": 0.04490969698789862,
"count": 10
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.55198091124762e-05,
"min": 1.55198091124762e-05,
"max": 0.00028411157672471424,
"count": 10
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0002172773275746668,
"min": 0.0002172773275746668,
"max": 0.002819791160069666,
"count": 10
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10517323809523811,
"min": 0.10517323809523811,
"max": 0.1947038571428572,
"count": 10
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4724253333333335,
"min": 1.3629270000000004,
"max": 2.1693686666666667,
"count": 10
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0005268064857142861,
"min": 0.0005268064857142861,
"max": 0.009470915328571429,
"count": 10
},
"Pyramids.Policy.Beta.sum": {
"value": 0.007375290800000005,
"min": 0.007375290800000005,
"max": 0.09400904030000001,
"count": 10
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.02906503714621067,
"min": 0.02906503714621067,
"max": 0.3680463135242462,
"count": 10
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.4069105088710785,
"min": 0.4069105088710785,
"max": 2.576324224472046,
"count": 10
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 958.34375,
"min": 899.7777777777778,
"max": 990.7666666666667,
"count": 10
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30667.0,
"min": 16738.0,
"max": 32537.0,
"count": 10
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.8340750511270016,
"min": -0.8678824016276527,
"max": -0.5300222679420754,
"count": 10
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -26.690401636064053,
"min": -28.291401736438274,
"max": -14.310601234436035,
"count": 10
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.8340750511270016,
"min": -0.8678824016276527,
"max": -0.5300222679420754,
"count": 10
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -26.690401636064053,
"min": -28.291401736438274,
"max": -14.310601234436035,
"count": 10
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.28844117958215065,
"min": 0.28844117958215065,
"max": 6.61330724726705,
"count": 10
},
"Pyramids.Policy.RndReward.sum": {
"value": 9.230117746628821,
"min": 8.43572437018156,
"max": 112.42622320353985,
"count": 10
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679115200",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679115796"
},
"total": 595.7575347509999,
"count": 1,
"self": 0.4817856459998211,
"children": {
"run_training.setup": {
"total": 0.11076482900000428,
"count": 1,
"self": 0.11076482900000428
},
"TrainerController.start_learning": {
"total": 595.164984276,
"count": 1,
"self": 0.40402751101191825,
"children": {
"TrainerController._reset_env": {
"total": 7.237422566999953,
"count": 1,
"self": 7.237422566999953
},
"TrainerController.advance": {
"total": 587.2881953889881,
"count": 18900,
"self": 0.44381263899731493,
"children": {
"env_step": {
"total": 399.3241795319849,
"count": 18900,
"self": 365.52395515697765,
"children": {
"SubprocessEnvManager._take_step": {
"total": 33.546439834997614,
"count": 18900,
"self": 1.4827796249987841,
"children": {
"TorchPolicy.evaluate": {
"total": 32.06366020999883,
"count": 18795,
"self": 32.06366020999883
}
}
},
"workers": {
"total": 0.253784540009633,
"count": 18900,
"self": 0.0,
"children": {
"worker_root": {
"total": 593.6004999339997,
"count": 18900,
"is_parallel": true,
"self": 264.1872149439914,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002024562000087826,
"count": 1,
"is_parallel": true,
"self": 0.000679301999866766,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013452600002210602,
"count": 8,
"is_parallel": true,
"self": 0.0013452600002210602
}
}
},
"UnityEnvironment.step": {
"total": 0.04842984100002923,
"count": 1,
"is_parallel": true,
"self": 0.0005369990000190228,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005413979999957519,
"count": 1,
"is_parallel": true,
"self": 0.0005413979999957519
},
"communicator.exchange": {
"total": 0.04559765800001969,
"count": 1,
"is_parallel": true,
"self": 0.04559765800001969
},
"steps_from_proto": {
"total": 0.0017537859999947614,
"count": 1,
"is_parallel": true,
"self": 0.0004373560001340593,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001316429999860702,
"count": 8,
"is_parallel": true,
"self": 0.001316429999860702
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 329.41328499000826,
"count": 18899,
"is_parallel": true,
"self": 9.387829438018002,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.202833267998699,
"count": 18899,
"is_parallel": true,
"self": 7.202833267998699
},
"communicator.exchange": {
"total": 283.79601939299323,
"count": 18899,
"is_parallel": true,
"self": 283.79601939299323
},
"steps_from_proto": {
"total": 29.026602890998333,
"count": 18899,
"is_parallel": true,
"self": 6.302943624954196,
"children": {
"_process_rank_one_or_two_observation": {
"total": 22.723659266044137,
"count": 151192,
"is_parallel": true,
"self": 22.723659266044137
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 187.52020321800592,
"count": 18900,
"self": 0.6296578120136473,
"children": {
"process_trajectory": {
"total": 35.372208889991384,
"count": 18900,
"self": 35.372208889991384
},
"_update_policy": {
"total": 151.5183365160009,
"count": 123,
"self": 96.57007124200902,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.948265273991865,
"count": 6831,
"self": 54.948265273991865
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0870001005969243e-06,
"count": 1,
"self": 1.0870001005969243e-06
},
"TrainerController._save_models": {
"total": 0.23533772199994019,
"count": 1,
"self": 0.001340895999874192,
"children": {
"RLTrainer._checkpoint": {
"total": 0.233996826000066,
"count": 1,
"self": 0.233996826000066
}
}
}
}
}
}
}