testpyramidsrnd / run_logs /timers.json
dvalbuena1's picture
First Pyramids
99dab79
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7376081943511963,
"min": 0.7376081943511963,
"max": 1.4347764253616333,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 22092.83984375,
"min": 22092.83984375,
"max": 43525.37890625,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479905.0,
"min": 29952.0,
"max": 479905.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479905.0,
"min": 29952.0,
"max": 479905.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.022917557507753372,
"min": -0.12516728043556213,
"max": 0.14653901755809784,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -5.591884136199951,
"min": -30.04014778137207,
"max": 34.7297477722168,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.011977709829807281,
"min": 0.00611144071444869,
"max": 0.24402427673339844,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.9225611686706543,
"min": 1.478968620300293,
"max": 58.565826416015625,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07165605075548,
"min": 0.06319745854259871,
"max": 0.0729989165579078,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.00318471057672,
"min": 0.48017553809090874,
"max": 1.0150389934676443,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.003922808469266681,
"min": 0.00017044378441988213,
"max": 0.0059971272449223165,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.05491931856973353,
"min": 0.001193106490939175,
"max": 0.07535271729177431,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.156786423931428e-05,
"min": 2.156786423931428e-05,
"max": 0.00029030126037577137,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0003019500993503999,
"min": 0.0003019500993503999,
"max": 0.0021562625812460005,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10718925714285714,
"min": 0.10718925714285714,
"max": 0.19676708571428575,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5006496,
"min": 1.1701888,
"max": 2.018754,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0007282067885714287,
"min": 0.0007282067885714287,
"max": 0.00967703186285714,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.01019489504,
"min": 0.01019489504,
"max": 0.07193352459999999,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.020340751856565475,
"min": 0.020340751856565475,
"max": 0.35770383477211,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.28477051854133606,
"min": 0.28477051854133606,
"max": 2.5039267539978027,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 912.6176470588235,
"min": 865.3055555555555,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31029.0,
"min": 15984.0,
"max": 31968.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.3836706357405466,
"min": -1.0000000521540642,
"max": -0.19908892839319176,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -13.044801615178585,
"min": -32.000001668930054,
"max": -7.167201422154903,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.3836706357405466,
"min": -1.0000000521540642,
"max": -0.19908892839319176,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -13.044801615178585,
"min": -32.000001668930054,
"max": -7.167201422154903,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.19226156707223066,
"min": 0.19226156707223066,
"max": 6.720401773229241,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 6.536893280455843,
"min": 6.536893280455843,
"max": 107.52642837166786,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1660942166",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1660943098"
},
"total": 932.405891352,
"count": 1,
"self": 0.47755846099994415,
"children": {
"run_training.setup": {
"total": 0.04169316499996967,
"count": 1,
"self": 0.04169316499996967
},
"TrainerController.start_learning": {
"total": 931.8866397260001,
"count": 1,
"self": 0.6573253499847169,
"children": {
"TrainerController._reset_env": {
"total": 10.363247745999956,
"count": 1,
"self": 10.363247745999956
},
"TrainerController.advance": {
"total": 920.7708057630155,
"count": 31505,
"self": 0.6989882300242698,
"children": {
"env_step": {
"total": 562.9681373970069,
"count": 31505,
"self": 510.5003465470128,
"children": {
"SubprocessEnvManager._take_step": {
"total": 52.11612102399499,
"count": 31505,
"self": 2.2772967379970623,
"children": {
"TorchPolicy.evaluate": {
"total": 49.83882428599793,
"count": 31324,
"self": 16.967424204008353,
"children": {
"TorchPolicy.sample_actions": {
"total": 32.87140008198958,
"count": 31324,
"self": 32.87140008198958
}
}
}
}
},
"workers": {
"total": 0.35166982599912444,
"count": 31505,
"self": 0.0,
"children": {
"worker_root": {
"total": 929.8279945290066,
"count": 31505,
"is_parallel": true,
"self": 469.2585843840086,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00523813400002382,
"count": 1,
"is_parallel": true,
"self": 0.004049410999982683,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011887230000411364,
"count": 8,
"is_parallel": true,
"self": 0.0011887230000411364
}
}
},
"UnityEnvironment.step": {
"total": 0.047461717999908615,
"count": 1,
"is_parallel": true,
"self": 0.0004995249998955842,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000454087999969488,
"count": 1,
"is_parallel": true,
"self": 0.000454087999969488
},
"communicator.exchange": {
"total": 0.04491655900005753,
"count": 1,
"is_parallel": true,
"self": 0.04491655900005753
},
"steps_from_proto": {
"total": 0.0015915459999860104,
"count": 1,
"is_parallel": true,
"self": 0.0004124369997953181,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011791090001906923,
"count": 8,
"is_parallel": true,
"self": 0.0011791090001906923
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 460.56941014499796,
"count": 31504,
"is_parallel": true,
"self": 13.453589531009015,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.295920657001034,
"count": 31504,
"is_parallel": true,
"self": 11.295920657001034
},
"communicator.exchange": {
"total": 390.6960824149977,
"count": 31504,
"is_parallel": true,
"self": 390.6960824149977
},
"steps_from_proto": {
"total": 45.123817541990206,
"count": 31504,
"is_parallel": true,
"self": 11.082685498004594,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.04113204398561,
"count": 252032,
"is_parallel": true,
"self": 34.04113204398561
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 357.1036801359843,
"count": 31505,
"self": 1.0444641319820676,
"children": {
"process_trajectory": {
"total": 78.15251227400177,
"count": 31505,
"self": 78.0441957830019,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10831649099986862,
"count": 1,
"self": 0.10831649099986862
}
}
},
"_update_policy": {
"total": 277.90670373000046,
"count": 186,
"self": 112.0213123279982,
"children": {
"TorchPPOOptimizer.update": {
"total": 165.88539140200226,
"count": 11496,
"self": 165.88539140200226
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0080000265588751e-06,
"count": 1,
"self": 1.0080000265588751e-06
},
"TrainerController._save_models": {
"total": 0.09525985899995248,
"count": 1,
"self": 0.0016372319998936291,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09362262700005886,
"count": 1,
"self": 0.09362262700005886
}
}
}
}
}
}
}