ppo-PyramidsRND / run_logs /timers.json
chikoto
First Push
a44340f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.11211054027080536,
"min": 0.10437927395105362,
"max": 1.577974796295166,
"count": 200
},
"Pyramids.Policy.Entropy.sum": {
"value": 1110.3427734375,
"min": 1021.8394775390625,
"max": 16158.4619140625,
"count": 200
},
"Pyramids.Step.mean": {
"value": 1999875.0,
"min": 9984.0,
"max": 1999875.0,
"count": 200
},
"Pyramids.Step.sum": {
"value": 1999875.0,
"min": 9984.0,
"max": 1999875.0,
"count": 200
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.05197207257151604,
"min": -0.10697302222251892,
"max": -0.009336870163679123,
"count": 200
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.209737777709961,
"min": -8.664814949035645,
"max": -0.7469496130943298,
"count": 200
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 1.4934475421905518,
"min": 0.45394232869148254,
"max": 1.5945155620574951,
"count": 200
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 120.96925354003906,
"min": 35.407501220703125,
"max": 129.15576171875,
"count": 200
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07237765926014011,
"min": 0.06081282791565172,
"max": 0.07969601967865053,
"count": 200
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.3618882963007006,
"min": 0.14916623805765994,
"max": 0.39848009839325266,
"count": 200
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.00882357942367283,
"min": 0.001527048253360679,
"max": 0.09856268196385462,
"count": 200
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.04411789711836415,
"min": 0.0071149989856054,
"max": 0.19712536392770924,
"count": 200
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.498297500900062e-07,
"min": 7.498297500900062e-07,
"max": 0.00029907840030719997,
"count": 200
},
"Pyramids.Policy.LearningRate.sum": {
"value": 3.749148750450031e-06,
"min": 3.749148750450031e-06,
"max": 0.00140631168122945,
"count": 200
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10024991000000003,
"min": 0.10024991000000003,
"max": 0.1996928,
"count": 200
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.5012495500000002,
"min": 0.3985856000000001,
"max": 0.96877055,
"count": 200
},
"Pyramids.Policy.Beta.mean": {
"value": 3.496600900000021e-05,
"min": 3.496600900000021e-05,
"max": 0.00996931072,
"count": 200
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00017483004500000104,
"min": 0.00017483004500000104,
"max": 0.046880177945000004,
"count": 200
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.7244455218315125,
"min": 0.6063039898872375,
"max": 0.9765970706939697,
"count": 200
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 3.622227668762207,
"min": 1.4746968746185303,
"max": 4.058317184448242,
"count": 200
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 916.0,
"min": 667.0,
"max": 999.0,
"count": 197
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 11908.0,
"min": 667.0,
"max": 15984.0,
"count": 197
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.4549538968847348,
"min": -1.0000000521540642,
"max": 1.3330000191926956,
"count": 197
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -5.914400659501553,
"min": -16.000000834465027,
"max": 1.4721996784210205,
"count": 197
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.4549538968847348,
"min": -1.0000000521540642,
"max": 1.3330000191926956,
"count": 197
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -5.914400659501553,
"min": -16.000000834465027,
"max": 1.4721996784210205,
"count": 197
},
"Pyramids.Policy.RndReward.mean": {
"value": 12.666740596294403,
"min": 9.22929909825325,
"max": 25.736984878778458,
"count": 197
},
"Pyramids.Policy.RndReward.sum": {
"value": 164.66762775182724,
"min": 9.22929909825325,
"max": 411.7917580604553,
"count": 197
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688614286",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688618735"
},
"total": 4449.416645535,
"count": 1,
"self": 0.5417612119990736,
"children": {
"run_training.setup": {
"total": 0.04669272599994656,
"count": 1,
"self": 0.04669272599994656
},
"TrainerController.start_learning": {
"total": 4448.828191597,
"count": 1,
"self": 3.594500760970732,
"children": {
"TrainerController._reset_env": {
"total": 4.704707150000104,
"count": 1,
"self": 4.704707150000104
},
"TrainerController.advance": {
"total": 4440.459955096028,
"count": 126654,
"self": 3.7311828959327613,
"children": {
"env_step": {
"total": 3008.8068601269124,
"count": 126654,
"self": 2733.304379934815,
"children": {
"SubprocessEnvManager._take_step": {
"total": 273.2025281229546,
"count": 126654,
"self": 11.415960402927567,
"children": {
"TorchPolicy.evaluate": {
"total": 261.78656772002705,
"count": 125051,
"self": 261.78656772002705
}
}
},
"workers": {
"total": 2.299952069142364,
"count": 126654,
"self": 0.0,
"children": {
"worker_root": {
"total": 4436.794861562939,
"count": 126654,
"is_parallel": true,
"self": 1982.681795018997,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00726297500000328,
"count": 1,
"is_parallel": true,
"self": 0.0057260560001850536,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015369189998182264,
"count": 8,
"is_parallel": true,
"self": 0.0015369189998182264
}
}
},
"UnityEnvironment.step": {
"total": 0.05525759399984054,
"count": 1,
"is_parallel": true,
"self": 0.0006821769998168747,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005257160000837757,
"count": 1,
"is_parallel": true,
"self": 0.0005257160000837757
},
"communicator.exchange": {
"total": 0.04979021700000885,
"count": 1,
"is_parallel": true,
"self": 0.04979021700000885
},
"steps_from_proto": {
"total": 0.004259483999931035,
"count": 1,
"is_parallel": true,
"self": 0.00045026899965705525,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00380921500027398,
"count": 8,
"is_parallel": true,
"self": 0.00380921500027398
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2454.1130665439423,
"count": 126653,
"is_parallel": true,
"self": 79.46362502206784,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 51.10031400495973,
"count": 126653,
"is_parallel": true,
"self": 51.10031400495973
},
"communicator.exchange": {
"total": 2086.1441328030096,
"count": 126653,
"is_parallel": true,
"self": 2086.1441328030096
},
"steps_from_proto": {
"total": 237.40499471390513,
"count": 126653,
"is_parallel": true,
"self": 47.60021024569414,
"children": {
"_process_rank_one_or_two_observation": {
"total": 189.80478446821098,
"count": 1013224,
"is_parallel": true,
"self": 189.80478446821098
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1427.9219120731832,
"count": 126654,
"self": 7.376676462175283,
"children": {
"process_trajectory": {
"total": 244.58542896601148,
"count": 126654,
"self": 244.13669510001046,
"children": {
"RLTrainer._checkpoint": {
"total": 0.44873386600102094,
"count": 4,
"self": 0.44873386600102094
}
}
},
"_update_policy": {
"total": 1175.9598066449964,
"count": 921,
"self": 761.7648807160115,
"children": {
"TorchPPOOptimizer.update": {
"total": 414.19492592898496,
"count": 45459,
"self": 414.19492592898496
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.680006769485772e-07,
"count": 1,
"self": 9.680006769485772e-07
},
"TrainerController._save_models": {
"total": 0.06902762200024881,
"count": 1,
"self": 0.0005604360003417241,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06846718599990709,
"count": 1,
"self": 0.06846718599990709
}
}
}
}
}
}
}