ppo-Huggy / run_logs /timers.json
ducdo's picture
Huggy
60a17db
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4063745737075806,
"min": 1.4063745737075806,
"max": 1.4283442497253418,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70583.125,
"min": 68088.3203125,
"max": 77922.78125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.42830188679245,
"min": 85.62975778546713,
"max": 361.45323741007195,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48987.0,
"min": 48919.0,
"max": 50242.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999975.0,
"min": 49631.0,
"max": 1999975.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999975.0,
"min": 49631.0,
"max": 1999975.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.364187717437744,
"min": 0.11931373924016953,
"max": 2.463958740234375,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1253.01953125,
"min": 16.465295791625977,
"max": 1365.5419921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5802218891539663,
"min": 1.7672343191461286,
"max": 3.9659530431321524,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1897.5176012516022,
"min": 243.87833604216576,
"max": 2156.4057800769806,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5802218891539663,
"min": 1.7672343191461286,
"max": 3.9659530431321524,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1897.5176012516022,
"min": 243.87833604216576,
"max": 2156.4057800769806,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014935311320409206,
"min": 0.012949352296648916,
"max": 0.019665569811250963,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04480593396122762,
"min": 0.025898704593297833,
"max": 0.05899670943375289,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05536888382501073,
"min": 0.021772867999970913,
"max": 0.0569935085458888,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16610665147503217,
"min": 0.043545735999941826,
"max": 0.1709805256376664,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.2685489105166667e-06,
"min": 3.2685489105166667e-06,
"max": 0.000295370476543175,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.80564673155e-06,
"min": 9.80564673155e-06,
"max": 0.0008443389185536999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1010894833333333,
"min": 0.1010894833333333,
"max": 0.19845682500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3032684499999999,
"min": 0.20732779999999998,
"max": 0.5814463,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.436521833333334e-05,
"min": 6.436521833333334e-05,
"max": 0.004922995567500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019309565500000002,
"min": 0.00019309565500000002,
"max": 0.01407417037,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671235309",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671237666"
},
"total": 2357.13801927,
"count": 1,
"self": 0.3960645829997702,
"children": {
"run_training.setup": {
"total": 0.10805261999996674,
"count": 1,
"self": 0.10805261999996674
},
"TrainerController.start_learning": {
"total": 2356.6339020670002,
"count": 1,
"self": 4.026731225897038,
"children": {
"TrainerController._reset_env": {
"total": 8.670285944999932,
"count": 1,
"self": 8.670285944999932
},
"TrainerController.advance": {
"total": 2343.817243637103,
"count": 232229,
"self": 4.41872043017338,
"children": {
"env_step": {
"total": 1856.9766943389493,
"count": 232229,
"self": 1561.5113232908907,
"children": {
"SubprocessEnvManager._take_step": {
"total": 292.7262306460228,
"count": 232229,
"self": 14.988549118990363,
"children": {
"TorchPolicy.evaluate": {
"total": 277.73768152703246,
"count": 223031,
"self": 69.59991880304813,
"children": {
"TorchPolicy.sample_actions": {
"total": 208.13776272398434,
"count": 223031,
"self": 208.13776272398434
}
}
}
}
},
"workers": {
"total": 2.7391404020359005,
"count": 232229,
"self": 0.0,
"children": {
"worker_root": {
"total": 2348.469596485064,
"count": 232229,
"is_parallel": true,
"self": 1063.4148527820398,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002198398000018642,
"count": 1,
"is_parallel": true,
"self": 0.0003344879999076511,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001863910000110991,
"count": 2,
"is_parallel": true,
"self": 0.001863910000110991
}
}
},
"UnityEnvironment.step": {
"total": 0.02933819500003665,
"count": 1,
"is_parallel": true,
"self": 0.0002795780000042214,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001843400000325346,
"count": 1,
"is_parallel": true,
"self": 0.0001843400000325346
},
"communicator.exchange": {
"total": 0.028169780999974137,
"count": 1,
"is_parallel": true,
"self": 0.028169780999974137
},
"steps_from_proto": {
"total": 0.0007044960000257561,
"count": 1,
"is_parallel": true,
"self": 0.00023714000008112635,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046735599994462973,
"count": 2,
"is_parallel": true,
"self": 0.00046735599994462973
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1285.054743703024,
"count": 232228,
"is_parallel": true,
"self": 36.18346893708235,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.43722834195069,
"count": 232228,
"is_parallel": true,
"self": 84.43722834195069
},
"communicator.exchange": {
"total": 1064.4543991749651,
"count": 232228,
"is_parallel": true,
"self": 1064.4543991749651
},
"steps_from_proto": {
"total": 99.9796472490259,
"count": 232228,
"is_parallel": true,
"self": 43.54019355314756,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.43945369587834,
"count": 464456,
"is_parallel": true,
"self": 56.43945369587834
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 482.42182886798,
"count": 232229,
"self": 6.251811295051311,
"children": {
"process_trajectory": {
"total": 158.84119919592763,
"count": 232229,
"self": 157.65370497092806,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1874942249995684,
"count": 10,
"self": 1.1874942249995684
}
}
},
"_update_policy": {
"total": 317.3288183770011,
"count": 97,
"self": 263.8422246350076,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.48659374199349,
"count": 2910,
"self": 53.48659374199349
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3140002010914031e-06,
"count": 1,
"self": 1.3140002010914031e-06
},
"TrainerController._save_models": {
"total": 0.11963994500001718,
"count": 1,
"self": 0.002499770000213175,
"children": {
"RLTrainer._checkpoint": {
"total": 0.117140174999804,
"count": 1,
"self": 0.117140174999804
}
}
}
}
}
}
}