ppo-Huggy / run_logs /timers.json
cpgrant's picture
Huggy2
cc92cdc verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4048986434936523,
"min": 1.4048986434936523,
"max": 1.4263681173324585,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70736.6484375,
"min": 67301.8125,
"max": 77108.9296875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 76.37306501547988,
"min": 76.37306501547988,
"max": 408.2439024390244,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49337.0,
"min": 48840.0,
"max": 50214.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999961.0,
"min": 49687.0,
"max": 1999961.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999961.0,
"min": 49687.0,
"max": 1999961.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4341607093811035,
"min": 0.038086436688899994,
"max": 2.4809670448303223,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1572.4677734375,
"min": 4.64654541015625,
"max": 1572.4677734375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8134656087163803,
"min": 1.853650940734832,
"max": 4.089835539485741,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2463.4987832307816,
"min": 226.1454147696495,
"max": 2504.751701235771,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8134656087163803,
"min": 1.853650940734832,
"max": 4.089835539485741,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2463.4987832307816,
"min": 226.1454147696495,
"max": 2504.751701235771,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01577245648594625,
"min": 0.013496501123230094,
"max": 0.020897256287025812,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.047317369457838744,
"min": 0.02747815896679337,
"max": 0.05891120951952568,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05710878661937183,
"min": 0.02352624184762438,
"max": 0.0610085478052497,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1713263598581155,
"min": 0.04705248369524876,
"max": 0.1799698247263829,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6590987803333346e-06,
"min": 3.6590987803333346e-06,
"max": 0.00029530890156370004,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0977296341000004e-05,
"min": 1.0977296341000004e-05,
"max": 0.0008441379186207001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10121966666666667,
"min": 0.10121966666666667,
"max": 0.1984363,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.303659,
"min": 0.20756750000000002,
"max": 0.5813793,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.08613666666667e-05,
"min": 7.08613666666667e-05,
"max": 0.00492197137,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002125841000000001,
"min": 0.0002125841000000001,
"max": 0.01407082707,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1724349709",
"python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:40:32) [GCC 12.3.0]",
"command_line_arguments": "/home/ai24/miniconda3/envs/hfrl1b/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1724351011"
},
"total": 1302.4781580160002,
"count": 1,
"self": 0.1667126670001835,
"children": {
"run_training.setup": {
"total": 0.010047051000128704,
"count": 1,
"self": 0.010047051000128704
},
"TrainerController.start_learning": {
"total": 1302.3013982979999,
"count": 1,
"self": 1.761260258065704,
"children": {
"TrainerController._reset_env": {
"total": 0.9199118510000517,
"count": 1,
"self": 0.9199118510000517
},
"TrainerController.advance": {
"total": 1299.547408870934,
"count": 232607,
"self": 1.6438259888150242,
"children": {
"env_step": {
"total": 1060.049414705049,
"count": 232607,
"self": 662.808449208215,
"children": {
"SubprocessEnvManager._take_step": {
"total": 396.0441095708852,
"count": 232607,
"self": 6.7949427798557736,
"children": {
"TorchPolicy.evaluate": {
"total": 389.24916679102944,
"count": 222933,
"self": 389.24916679102944
}
}
},
"workers": {
"total": 1.1968559259487392,
"count": 232607,
"self": 0.0,
"children": {
"worker_root": {
"total": 1297.8832578049573,
"count": 232607,
"is_parallel": true,
"self": 738.0879688879786,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00043581299996731104,
"count": 1,
"is_parallel": true,
"self": 0.00011636699991868227,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0003194460000486288,
"count": 2,
"is_parallel": true,
"self": 0.0003194460000486288
}
}
},
"UnityEnvironment.step": {
"total": 0.00965422099989155,
"count": 1,
"is_parallel": true,
"self": 9.616599982109619e-05,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.831499988242285e-05,
"count": 1,
"is_parallel": true,
"self": 7.831499988242285e-05
},
"communicator.exchange": {
"total": 0.009264612000151828,
"count": 1,
"is_parallel": true,
"self": 0.009264612000151828
},
"steps_from_proto": {
"total": 0.00021512800003620214,
"count": 1,
"is_parallel": true,
"self": 6.60990001506434e-05,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00014902899988555873,
"count": 2,
"is_parallel": true,
"self": 0.00014902899988555873
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 559.7952889169787,
"count": 232606,
"is_parallel": true,
"self": 10.45726713793465,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 18.33294875599904,
"count": 232606,
"is_parallel": true,
"self": 18.33294875599904
},
"communicator.exchange": {
"total": 508.587666733991,
"count": 232606,
"is_parallel": true,
"self": 508.587666733991
},
"steps_from_proto": {
"total": 22.417406289054043,
"count": 232606,
"is_parallel": true,
"self": 7.566846273080273,
"children": {
"_process_rank_one_or_two_observation": {
"total": 14.85056001597377,
"count": 465212,
"is_parallel": true,
"self": 14.85056001597377
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 237.85416817707005,
"count": 232607,
"self": 2.712399865041334,
"children": {
"process_trajectory": {
"total": 95.724944079029,
"count": 232607,
"self": 94.98872758002858,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7362164990004203,
"count": 10,
"self": 0.7362164990004203
}
}
},
"_update_policy": {
"total": 139.41682423299972,
"count": 97,
"self": 85.68272680899304,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.73409742400668,
"count": 2910,
"self": 53.73409742400668
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.5400020098895766e-07,
"count": 1,
"self": 4.5400020098895766e-07
},
"TrainerController._save_models": {
"total": 0.072816863999833,
"count": 1,
"self": 0.0006684929999209999,
"children": {
"RLTrainer._checkpoint": {
"total": 0.072148370999912,
"count": 1,
"self": 0.072148370999912
}
}
}
}
}
}
}