ppo-Huggy / run_logs /timers.json
MoFabian's picture
Huggy
b8e7d70
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4093513488769531,
"min": 1.4093513488769531,
"max": 1.4282540082931519,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72009.3984375,
"min": 67575.8671875,
"max": 78886.7421875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 101.65922920892496,
"min": 79.52334943639292,
"max": 399.37301587301585,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 50118.0,
"min": 49167.0,
"max": 50321.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999944.0,
"min": 49853.0,
"max": 1999944.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999944.0,
"min": 49853.0,
"max": 1999944.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.397094964981079,
"min": 0.09536170214414597,
"max": 2.4822850227355957,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1181.767822265625,
"min": 11.920212745666504,
"max": 1516.360595703125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5715749916633777,
"min": 1.8983856554031373,
"max": 3.9881128963808083,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1760.7864708900452,
"min": 237.29820692539215,
"max": 2403.4085606336594,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5715749916633777,
"min": 1.8983856554031373,
"max": 3.9881128963808083,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1760.7864708900452,
"min": 237.29820692539215,
"max": 2403.4085606336594,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01873358878543109,
"min": 0.01372431875061011,
"max": 0.02072536153330778,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05620076635629327,
"min": 0.02744863750122022,
"max": 0.05620076635629327,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.047081976218356025,
"min": 0.021605065558105707,
"max": 0.05732574036551846,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14124592865506808,
"min": 0.043210131116211414,
"max": 0.1719772210965554,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.472498842533331e-06,
"min": 3.472498842533331e-06,
"max": 0.0002953533765488749,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0417496527599993e-05,
"min": 1.0417496527599993e-05,
"max": 0.0008441899686033498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115746666666665,
"min": 0.10115746666666665,
"max": 0.198451125,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3034724,
"min": 0.20747170000000004,
"max": 0.58139665,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.775758666666663e-05,
"min": 6.775758666666663e-05,
"max": 0.0049227111375,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002032727599999999,
"min": 0.0002032727599999999,
"max": 0.014071692834999996,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671040755",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671042888"
},
"total": 2133.413352953,
"count": 1,
"self": 0.4409369489999335,
"children": {
"run_training.setup": {
"total": 0.12070605399992473,
"count": 1,
"self": 0.12070605399992473
},
"TrainerController.start_learning": {
"total": 2132.85170995,
"count": 1,
"self": 3.635976590910559,
"children": {
"TrainerController._reset_env": {
"total": 10.769661004999989,
"count": 1,
"self": 10.769661004999989
},
"TrainerController.advance": {
"total": 2118.336592364089,
"count": 232506,
"self": 3.8804235512839114,
"children": {
"env_step": {
"total": 1666.4462551639303,
"count": 232506,
"self": 1401.0906255871237,
"children": {
"SubprocessEnvManager._take_step": {
"total": 262.87434323282434,
"count": 232506,
"self": 13.598351520879078,
"children": {
"TorchPolicy.evaluate": {
"total": 249.27599171194527,
"count": 223120,
"self": 62.9392301698698,
"children": {
"TorchPolicy.sample_actions": {
"total": 186.33676154207546,
"count": 223120,
"self": 186.33676154207546
}
}
}
}
},
"workers": {
"total": 2.481286343982447,
"count": 232506,
"self": 0.0,
"children": {
"worker_root": {
"total": 2125.3971207050236,
"count": 232506,
"is_parallel": true,
"self": 975.5302900510478,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002033064999977796,
"count": 1,
"is_parallel": true,
"self": 0.00032659199996487587,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017064730000129202,
"count": 2,
"is_parallel": true,
"self": 0.0017064730000129202
}
}
},
"UnityEnvironment.step": {
"total": 0.029163917000005313,
"count": 1,
"is_parallel": true,
"self": 0.0004144340001630553,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018539499990311015,
"count": 1,
"is_parallel": true,
"self": 0.00018539499990311015
},
"communicator.exchange": {
"total": 0.027722712000013416,
"count": 1,
"is_parallel": true,
"self": 0.027722712000013416
},
"steps_from_proto": {
"total": 0.0008413759999257309,
"count": 1,
"is_parallel": true,
"self": 0.0002739880000035555,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005673879999221754,
"count": 2,
"is_parallel": true,
"self": 0.0005673879999221754
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1149.8668306539757,
"count": 232505,
"is_parallel": true,
"self": 33.92490005410082,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 73.37999477895096,
"count": 232505,
"is_parallel": true,
"self": 73.37999477895096
},
"communicator.exchange": {
"total": 952.2510296318769,
"count": 232505,
"is_parallel": true,
"self": 952.2510296318769
},
"steps_from_proto": {
"total": 90.31090618904716,
"count": 232505,
"is_parallel": true,
"self": 37.01610053707782,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.29480565196934,
"count": 465010,
"is_parallel": true,
"self": 53.29480565196934
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 448.00991364887466,
"count": 232506,
"self": 5.67279632788609,
"children": {
"process_trajectory": {
"total": 139.29653692898808,
"count": 232506,
"self": 138.84250031198746,
"children": {
"RLTrainer._checkpoint": {
"total": 0.45403661700061093,
"count": 4,
"self": 0.45403661700061093
}
}
},
"_update_policy": {
"total": 303.0405803920005,
"count": 97,
"self": 250.38054596599193,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.660034426008565,
"count": 2910,
"self": 52.660034426008565
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0750000001280569e-06,
"count": 1,
"self": 1.0750000001280569e-06
},
"TrainerController._save_models": {
"total": 0.10947891500018159,
"count": 1,
"self": 0.0020828379997510638,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10739607700043052,
"count": 1,
"self": 0.10739607700043052
}
}
}
}
}
}
}