ppo-Huggy / run_logs /timers.json
Jairnetojp's picture
Huggy
e2271b4
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.399404525756836,
"min": 1.3993847370147705,
"max": 1.4282042980194092,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69472.0390625,
"min": 69042.4921875,
"max": 77480.5,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 115.24883720930232,
"min": 108.91629955947137,
"max": 390.1796875,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49557.0,
"min": 49063.0,
"max": 50201.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999960.0,
"min": 49348.0,
"max": 1999960.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999960.0,
"min": 49348.0,
"max": 1999960.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3263306617736816,
"min": 0.06068781390786171,
"max": 2.3524115085601807,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1000.3221435546875,
"min": 7.707352161407471,
"max": 1026.75,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5133702425069586,
"min": 1.7918434236931988,
"max": 3.8198634717675994,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1510.7492042779922,
"min": 227.56411480903625,
"max": 1656.2133178710938,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5133702425069586,
"min": 1.7918434236931988,
"max": 3.8198634717675994,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1510.7492042779922,
"min": 227.56411480903625,
"max": 1656.2133178710938,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.022108360500230143,
"min": 0.012974449577288598,
"max": 0.022108360500230143,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.044216721000460285,
"min": 0.02694974663160489,
"max": 0.05873404970237364,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04657639600336552,
"min": 0.022803141083568335,
"max": 0.06670776593188446,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09315279200673104,
"min": 0.04560628216713667,
"max": 0.18257888903220496,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.209398596900003e-06,
"min": 4.209398596900003e-06,
"max": 0.0002953602765465749,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.418797193800007e-06,
"min": 8.418797193800007e-06,
"max": 0.0008441305686231499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1014031,
"min": 0.1014031,
"max": 0.19845342500000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.2028062,
"min": 0.2028062,
"max": 0.5813768500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.001469000000007e-05,
"min": 8.001469000000007e-05,
"max": 0.0049228259075,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016002938000000014,
"min": 0.00016002938000000014,
"max": 0.014070704815000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670792067",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670794255"
},
"total": 2187.3471971789995,
"count": 1,
"self": 0.3964854979994925,
"children": {
"run_training.setup": {
"total": 0.4299121259999765,
"count": 1,
"self": 0.4299121259999765
},
"TrainerController.start_learning": {
"total": 2186.520799555,
"count": 1,
"self": 3.6737442039634516,
"children": {
"TrainerController._reset_env": {
"total": 10.508478303000004,
"count": 1,
"self": 10.508478303000004
},
"TrainerController.advance": {
"total": 2172.212664476036,
"count": 230393,
"self": 4.096621761004826,
"children": {
"env_step": {
"total": 1711.7142273600512,
"count": 230393,
"self": 1434.5033309911562,
"children": {
"SubprocessEnvManager._take_step": {
"total": 274.7132061359767,
"count": 230393,
"self": 14.024938907065405,
"children": {
"TorchPolicy.evaluate": {
"total": 260.6882672289113,
"count": 222944,
"self": 64.55022383489421,
"children": {
"TorchPolicy.sample_actions": {
"total": 196.13804339401707,
"count": 222944,
"self": 196.13804339401707
}
}
}
}
},
"workers": {
"total": 2.4976902329183304,
"count": 230393,
"self": 0.0,
"children": {
"worker_root": {
"total": 2179.2498037379282,
"count": 230393,
"is_parallel": true,
"self": 998.127821664853,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021103959999777544,
"count": 1,
"is_parallel": true,
"self": 0.00029359100000192484,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018168049999758296,
"count": 2,
"is_parallel": true,
"self": 0.0018168049999758296
}
}
},
"UnityEnvironment.step": {
"total": 0.027209695999999894,
"count": 1,
"is_parallel": true,
"self": 0.0002683650000108173,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001746730000036223,
"count": 1,
"is_parallel": true,
"self": 0.0001746730000036223
},
"communicator.exchange": {
"total": 0.026069340999981705,
"count": 1,
"is_parallel": true,
"self": 0.026069340999981705
},
"steps_from_proto": {
"total": 0.0006973170000037499,
"count": 1,
"is_parallel": true,
"self": 0.0002398390000166728,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00045747799998707706,
"count": 2,
"is_parallel": true,
"self": 0.00045747799998707706
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1181.1219820730753,
"count": 230392,
"is_parallel": true,
"self": 34.36778842209674,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.05278143598036,
"count": 230392,
"is_parallel": true,
"self": 75.05278143598036
},
"communicator.exchange": {
"total": 980.7481968400073,
"count": 230392,
"is_parallel": true,
"self": 980.7481968400073
},
"steps_from_proto": {
"total": 90.9532153749908,
"count": 230392,
"is_parallel": true,
"self": 37.19664421291969,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.75657116207111,
"count": 460784,
"is_parallel": true,
"self": 53.75657116207111
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 456.40181535498016,
"count": 230393,
"self": 5.860637586947917,
"children": {
"process_trajectory": {
"total": 137.36401602203188,
"count": 230393,
"self": 136.88624085603186,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4777751660000149,
"count": 4,
"self": 0.4777751660000149
}
}
},
"_update_policy": {
"total": 313.17716174600037,
"count": 96,
"self": 260.18686717800034,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.990294568000024,
"count": 2880,
"self": 52.990294568000024
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.960003808373585e-07,
"count": 1,
"self": 9.960003808373585e-07
},
"TrainerController._save_models": {
"total": 0.12591157599990765,
"count": 1,
"self": 0.0021731349997935467,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12373844100011411,
"count": 1,
"self": 0.12373844100011411
}
}
}
}
}
}
}