ppo-PyramidsRND / run_logs /timers.json
paicup09's picture
Upload PyramidsRND
5afbddc
raw
history blame contribute delete
No virus
19.2 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3652462661266327,
"min": 0.35558298230171204,
"max": 1.4143867492675781,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11027.515625,
"min": 10693.759765625,
"max": 42906.8359375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989976.0,
"min": 29952.0,
"max": 989976.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989976.0,
"min": 29952.0,
"max": 989976.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.49422723054885864,
"min": -0.11669795215129852,
"max": 0.49422723054885864,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 131.46444702148438,
"min": -27.774112701416016,
"max": 131.46444702148438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.018996428698301315,
"min": 0.007305871229618788,
"max": 0.5553948879241943,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.0530500411987305,
"min": 1.884914755821228,
"max": 131.6285858154297,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06665942495289658,
"min": 0.06563412556259245,
"max": 0.07392641945142982,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9332319493405521,
"min": 0.5174849361600087,
"max": 1.0447802424023394,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01383853923297942,
"min": 0.00010542718356777116,
"max": 0.014246269452303836,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19373954926171186,
"min": 0.0011596990192454827,
"max": 0.1994477723322537,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.0140457298804812e-05,
"min": 1.0140457298804812e-05,
"max": 0.0002951986437503818,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00014196640218326736,
"min": 0.00014196640218326736,
"max": 0.0032284872525504948,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10338012022630837,
"min": 0.10338012022630837,
"max": 0.19839954738330975,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.447321683168317,
"min": 1.3887968316831683,
"max": 2.321582277227723,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0003476740106082038,
"min": 0.0003476740106082038,
"max": 0.009840114783592644,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.004867436148514853,
"min": 0.004867436148514853,
"max": 0.10762862138613861,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014419575221836567,
"min": 0.014419575221836567,
"max": 0.5946853756904602,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2018740475177765,
"min": 0.2018740475177765,
"max": 4.162797451019287,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 402.3466666666667,
"min": 402.3466666666667,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30176.0,
"min": 15984.0,
"max": 33290.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4376106444001198,
"min": -1.0000000521540642,
"max": 1.4376106444001198,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 107.82079833000898,
"min": -31.998401671648026,
"max": 107.82079833000898,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4376106444001198,
"min": -1.0000000521540642,
"max": 1.4376106444001198,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 107.82079833000898,
"min": -31.998401671648026,
"max": 107.82079833000898,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05992915028550973,
"min": 0.05992915028550973,
"max": 12.075403152033687,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.494686271413229,
"min": 4.386309638386592,
"max": 193.206450432539,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673796949",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673799016"
},
"total": 2067.0185086229994,
"count": 1,
"self": 0.47671244299954196,
"children": {
"run_training.setup": {
"total": 0.11826165299999047,
"count": 1,
"self": 0.11826165299999047
},
"TrainerController.start_learning": {
"total": 2066.423534527,
"count": 1,
"self": 1.2653916589838445,
"children": {
"TrainerController._reset_env": {
"total": 6.7974483289999625,
"count": 1,
"self": 6.7974483289999625
},
"TrainerController.advance": {
"total": 2058.269175276017,
"count": 64034,
"self": 1.3027417352045632,
"children": {
"env_step": {
"total": 1359.7551954319529,
"count": 64034,
"self": 1250.82816550896,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.10615614007065,
"count": 64034,
"self": 4.466587902997617,
"children": {
"TorchPolicy.evaluate": {
"total": 103.63956823707304,
"count": 63178,
"self": 34.732174530935936,
"children": {
"TorchPolicy.sample_actions": {
"total": 68.9073937061371,
"count": 63178,
"self": 68.9073937061371
}
}
}
}
},
"workers": {
"total": 0.8208737829222628,
"count": 64034,
"self": 0.0,
"children": {
"worker_root": {
"total": 2061.9042876479475,
"count": 64034,
"is_parallel": true,
"self": 912.8860802919512,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017428930000278342,
"count": 1,
"is_parallel": true,
"self": 0.00061219900044307,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011306939995847642,
"count": 8,
"is_parallel": true,
"self": 0.0011306939995847642
}
}
},
"UnityEnvironment.step": {
"total": 0.04727561699974103,
"count": 1,
"is_parallel": true,
"self": 0.0005343569996512088,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005241029998614977,
"count": 1,
"is_parallel": true,
"self": 0.0005241029998614977
},
"communicator.exchange": {
"total": 0.044465932000093744,
"count": 1,
"is_parallel": true,
"self": 0.044465932000093744
},
"steps_from_proto": {
"total": 0.001751225000134582,
"count": 1,
"is_parallel": true,
"self": 0.00046358900090126554,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012876359992333164,
"count": 8,
"is_parallel": true,
"self": 0.0012876359992333164
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1149.0182073559963,
"count": 64033,
"is_parallel": true,
"self": 29.62406433598244,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.328380398967056,
"count": 64033,
"is_parallel": true,
"self": 24.328380398967056
},
"communicator.exchange": {
"total": 988.8844497450455,
"count": 64033,
"is_parallel": true,
"self": 988.8844497450455
},
"steps_from_proto": {
"total": 106.18131287600136,
"count": 64033,
"is_parallel": true,
"self": 23.21197597377659,
"children": {
"_process_rank_one_or_two_observation": {
"total": 82.96933690222477,
"count": 512264,
"is_parallel": true,
"self": 82.96933690222477
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 697.2112381088596,
"count": 64034,
"self": 2.2577392447446982,
"children": {
"process_trajectory": {
"total": 150.28269188411969,
"count": 64034,
"self": 150.0749842261198,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20770765799989022,
"count": 2,
"self": 0.20770765799989022
}
}
},
"_update_policy": {
"total": 544.6708069799952,
"count": 438,
"self": 210.1910749380395,
"children": {
"TorchPPOOptimizer.update": {
"total": 334.4797320419557,
"count": 23040,
"self": 334.4797320419557
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0739995559561066e-06,
"count": 1,
"self": 1.0739995559561066e-06
},
"TrainerController._save_models": {
"total": 0.09151818899954378,
"count": 1,
"self": 0.0013942409996161587,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09012394799992762,
"count": 1,
"self": 0.09012394799992762
}
}
}
}
}
}
}