ppo-Huggy / run_logs /timers.json
PeterLOVANAS's picture
Huggy
ea99f70
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4067000150680542,
"min": 1.4067000150680542,
"max": 1.4305006265640259,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71316.875,
"min": 68756.0859375,
"max": 78265.1484375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 76.54290171606864,
"min": 74.26807228915662,
"max": 398.3968253968254,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49064.0,
"min": 48860.0,
"max": 50198.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999903.0,
"min": 49995.0,
"max": 1999903.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999903.0,
"min": 49995.0,
"max": 1999903.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.445237636566162,
"min": 0.20709088444709778,
"max": 2.542361259460449,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1567.3973388671875,
"min": 25.88636016845703,
"max": 1597.2890625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.737272260527529,
"min": 1.846358497619629,
"max": 4.0402990085584625,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2395.591518998146,
"min": 230.7948122024536,
"max": 2535.406445145607,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.737272260527529,
"min": 1.846358497619629,
"max": 4.0402990085584625,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2395.591518998146,
"min": 230.7948122024536,
"max": 2535.406445145607,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01589843193004425,
"min": 0.013997174390290941,
"max": 0.020058720331871883,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.047695295790132756,
"min": 0.027994348780581883,
"max": 0.059000144954188725,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.054870307569702466,
"min": 0.019808334391564132,
"max": 0.05925399487217267,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1646109227091074,
"min": 0.039616668783128264,
"max": 0.17776198461651802,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6826987724666687e-06,
"min": 3.6826987724666687e-06,
"max": 0.000295377226540925,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1048096317400006e-05,
"min": 1.1048096317400006e-05,
"max": 0.0008441644686118497,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10122753333333334,
"min": 0.10122753333333334,
"max": 0.19845907500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3036826,
"min": 0.20758254999999995,
"max": 0.5813881499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.12539133333334e-05,
"min": 7.12539133333334e-05,
"max": 0.0049231078425000015,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021376174000000022,
"min": 0.00021376174000000022,
"max": 0.014071268684999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681276339",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681278612"
},
"total": 2272.825530388,
"count": 1,
"self": 0.4384839669996836,
"children": {
"run_training.setup": {
"total": 0.17717539099999158,
"count": 1,
"self": 0.17717539099999158
},
"TrainerController.start_learning": {
"total": 2272.20987103,
"count": 1,
"self": 4.161777133014311,
"children": {
"TrainerController._reset_env": {
"total": 5.602622894999996,
"count": 1,
"self": 5.602622894999996
},
"TrainerController.advance": {
"total": 2262.328523908986,
"count": 233185,
"self": 4.484992140000486,
"children": {
"env_step": {
"total": 1767.5656215190493,
"count": 233185,
"self": 1497.5362417011452,
"children": {
"SubprocessEnvManager._take_step": {
"total": 267.28265593500925,
"count": 233185,
"self": 16.340168932897427,
"children": {
"TorchPolicy.evaluate": {
"total": 250.94248700211182,
"count": 223044,
"self": 250.94248700211182
}
}
},
"workers": {
"total": 2.7467238828948837,
"count": 233185,
"self": 0.0,
"children": {
"worker_root": {
"total": 2264.349231570066,
"count": 233185,
"is_parallel": true,
"self": 1038.958317721152,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012497580000001562,
"count": 1,
"is_parallel": true,
"self": 0.00032260300000075404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009271549999994022,
"count": 2,
"is_parallel": true,
"self": 0.0009271549999994022
}
}
},
"UnityEnvironment.step": {
"total": 0.05567641800001866,
"count": 1,
"is_parallel": true,
"self": 0.0003292390000524392,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022129199999199045,
"count": 1,
"is_parallel": true,
"self": 0.00022129199999199045
},
"communicator.exchange": {
"total": 0.05442278799998235,
"count": 1,
"is_parallel": true,
"self": 0.05442278799998235
},
"steps_from_proto": {
"total": 0.0007030989999918802,
"count": 1,
"is_parallel": true,
"self": 0.0002209179999965727,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004821809999953075,
"count": 2,
"is_parallel": true,
"self": 0.0004821809999953075
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1225.390913848914,
"count": 233184,
"is_parallel": true,
"self": 37.79920217878566,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.96090620705453,
"count": 233184,
"is_parallel": true,
"self": 77.96090620705453
},
"communicator.exchange": {
"total": 1022.2374248360273,
"count": 233184,
"is_parallel": true,
"self": 1022.2374248360273
},
"steps_from_proto": {
"total": 87.39338062704627,
"count": 233184,
"is_parallel": true,
"self": 32.68999399811031,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.70338662893596,
"count": 466368,
"is_parallel": true,
"self": 54.70338662893596
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 490.2779102499362,
"count": 233185,
"self": 6.372580475932466,
"children": {
"process_trajectory": {
"total": 129.32261727800352,
"count": 233185,
"self": 128.13791588100378,
"children": {
"RLTrainer._checkpoint": {
"total": 1.184701396999742,
"count": 10,
"self": 1.184701396999742
}
}
},
"_update_policy": {
"total": 354.5827124960002,
"count": 97,
"self": 297.19110990599785,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.39160259000238,
"count": 2910,
"self": 57.39160259000238
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.54999904934084e-07,
"count": 1,
"self": 9.54999904934084e-07
},
"TrainerController._save_models": {
"total": 0.11694613800000297,
"count": 1,
"self": 0.0020203200001560617,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11492581799984691,
"count": 1,
"self": 0.11492581799984691
}
}
}
}
}
}
}