HuggyDog / run_logs /timers.json
AtobeYanlin's picture
Huggy
fa7c25f verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.406399130821228,
"min": 1.4063981771469116,
"max": 1.4304447174072266,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69055.6015625,
"min": 68958.5078125,
"max": 76321.8671875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.42429906542056,
"min": 77.13458528951487,
"max": 366.3382352941176,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49447.0,
"min": 49076.0,
"max": 50220.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999944.0,
"min": 49836.0,
"max": 1999944.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999944.0,
"min": 49836.0,
"max": 1999944.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.407731294631958,
"min": 0.07655195891857147,
"max": 2.4783289432525635,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1288.13623046875,
"min": 11.1000337600708,
"max": 1553.9453125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.779746847174992,
"min": 1.8453407063566405,
"max": 4.064257586774556,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2022.1645632386208,
"min": 267.5744024217129,
"max": 2496.527106165886,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.779746847174992,
"min": 1.8453407063566405,
"max": 4.064257586774556,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2022.1645632386208,
"min": 267.5744024217129,
"max": 2496.527106165886,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015024761387353971,
"min": 0.013726897861003333,
"max": 0.02169678861812119,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04507428416206191,
"min": 0.027453795722006666,
"max": 0.06509036585436358,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05226574966477023,
"min": 0.02326111951842904,
"max": 0.06229026907434067,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1567972489943107,
"min": 0.04652223903685808,
"max": 0.1736502488454183,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.835498721533333e-06,
"min": 3.835498721533333e-06,
"max": 0.0002953425015525,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.15064961646e-05,
"min": 1.15064961646e-05,
"max": 0.0008443024685658498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10127846666666668,
"min": 0.10127846666666668,
"max": 0.19844750000000008,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30383540000000003,
"min": 0.20768275000000003,
"max": 0.5814341500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.379548666666668e-05,
"min": 7.379548666666668e-05,
"max": 0.0049225302499999995,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022138646000000002,
"min": 0.00022138646000000002,
"max": 0.014073564085000004,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1726762658",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1726767661"
},
"total": 5003.287052381001,
"count": 1,
"self": 1.0090026010002475,
"children": {
"run_training.setup": {
"total": 0.11413920100005726,
"count": 1,
"self": 0.11413920100005726
},
"TrainerController.start_learning": {
"total": 5002.163910579,
"count": 1,
"self": 8.729700881978715,
"children": {
"TrainerController._reset_env": {
"total": 4.35992919399996,
"count": 1,
"self": 4.35992919399996
},
"TrainerController.advance": {
"total": 4988.936352369021,
"count": 232633,
"self": 8.784878164658949,
"children": {
"env_step": {
"total": 3172.0158442031293,
"count": 232633,
"self": 2647.0147949511293,
"children": {
"SubprocessEnvManager._take_step": {
"total": 519.3098999540337,
"count": 232633,
"self": 33.99731664505873,
"children": {
"TorchPolicy.evaluate": {
"total": 485.3125833089749,
"count": 222906,
"self": 485.3125833089749
}
}
},
"workers": {
"total": 5.691149297966604,
"count": 232633,
"self": 0.0,
"children": {
"worker_root": {
"total": 4987.656985771987,
"count": 232633,
"is_parallel": true,
"self": 2881.353652962102,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0013159569999743326,
"count": 1,
"is_parallel": true,
"self": 0.0003472460000466526,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00096871099992768,
"count": 2,
"is_parallel": true,
"self": 0.00096871099992768
}
}
},
"UnityEnvironment.step": {
"total": 0.04341522499998973,
"count": 1,
"is_parallel": true,
"self": 0.0005253919999859136,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023049599997193582,
"count": 1,
"is_parallel": true,
"self": 0.00023049599997193582
},
"communicator.exchange": {
"total": 0.04156923100003951,
"count": 1,
"is_parallel": true,
"self": 0.04156923100003951
},
"steps_from_proto": {
"total": 0.0010901059999923746,
"count": 1,
"is_parallel": true,
"self": 0.0002834509999729562,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008066550000194184,
"count": 2,
"is_parallel": true,
"self": 0.0008066550000194184
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2106.303332809885,
"count": 232632,
"is_parallel": true,
"self": 66.3073202852654,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 116.14553314201908,
"count": 232632,
"is_parallel": true,
"self": 116.14553314201908
},
"communicator.exchange": {
"total": 1780.3862751816955,
"count": 232632,
"is_parallel": true,
"self": 1780.3862751816955
},
"steps_from_proto": {
"total": 143.464204200905,
"count": 232632,
"is_parallel": true,
"self": 46.75187388854306,
"children": {
"_process_rank_one_or_two_observation": {
"total": 96.71233031236193,
"count": 465264,
"is_parallel": true,
"self": 96.71233031236193
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1808.1356300012321,
"count": 232633,
"self": 13.438877998338285,
"children": {
"process_trajectory": {
"total": 270.161843717896,
"count": 232633,
"self": 268.658173279896,
"children": {
"RLTrainer._checkpoint": {
"total": 1.503670437999972,
"count": 10,
"self": 1.503670437999972
}
}
},
"_update_policy": {
"total": 1524.534908284998,
"count": 97,
"self": 360.56407436797485,
"children": {
"TorchPPOOptimizer.update": {
"total": 1163.9708339170231,
"count": 2910,
"self": 1163.9708339170231
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1350002750987187e-06,
"count": 1,
"self": 1.1350002750987187e-06
},
"TrainerController._save_models": {
"total": 0.13792699900022853,
"count": 1,
"self": 0.0045016450003458885,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13342535399988265,
"count": 1,
"self": 0.13342535399988265
}
}
}
}
}
}
}