ppo-Huggy / run_logs /timers.json
sipheiroce's picture
Huggy
3ed525f
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4007923603057861,
"min": 1.4007923603057861,
"max": 1.4268736839294434,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68948.3984375,
"min": 68355.7265625,
"max": 76920.6015625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 80.39087947882736,
"min": 78.16640253565768,
"max": 391.515625,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49360.0,
"min": 48844.0,
"max": 50114.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999958.0,
"min": 49845.0,
"max": 1999958.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999958.0,
"min": 49845.0,
"max": 1999958.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.394939661026001,
"min": 0.00452062813565135,
"max": 2.4571375846862793,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1470.492919921875,
"min": 0.5741197466850281,
"max": 1532.2919921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6738363549064736,
"min": 1.8986369155523346,
"max": 3.9346096283350236,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2255.7355219125748,
"min": 241.12688827514648,
"max": 2390.5631998181343,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6738363549064736,
"min": 1.8986369155523346,
"max": 3.9346096283350236,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2255.7355219125748,
"min": 241.12688827514648,
"max": 2390.5631998181343,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014083502792426669,
"min": 0.014083502792426669,
"max": 0.020321885131609936,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.042250508377280005,
"min": 0.029693088269171615,
"max": 0.055273245718368946,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.059423663839697834,
"min": 0.022659232933074237,
"max": 0.06161989147464435,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1782709915190935,
"min": 0.045318465866148475,
"max": 0.1782709915190935,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4801488399833343e-06,
"min": 3.4801488399833343e-06,
"max": 0.00029524035158654997,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0440446519950002e-05,
"min": 1.0440446519950002e-05,
"max": 0.0008436064687978498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10116001666666669,
"min": 0.10116001666666669,
"max": 0.1984134500000001,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30348005000000006,
"min": 0.2074488,
"max": 0.58120215,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.788483166666669e-05,
"min": 6.788483166666669e-05,
"max": 0.0049208311549999995,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020365449500000005,
"min": 0.00020365449500000005,
"max": 0.014061987285,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671073546",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671075878"
},
"total": 2332.1954697,
"count": 1,
"self": 0.3900981619999584,
"children": {
"run_training.setup": {
"total": 0.10867736499994862,
"count": 1,
"self": 0.10867736499994862
},
"TrainerController.start_learning": {
"total": 2331.696694173,
"count": 1,
"self": 4.025367542953973,
"children": {
"TrainerController._reset_env": {
"total": 9.200852647000033,
"count": 1,
"self": 9.200852647000033
},
"TrainerController.advance": {
"total": 2318.350162554046,
"count": 232707,
"self": 4.267579872028364,
"children": {
"env_step": {
"total": 1835.343304222988,
"count": 232707,
"self": 1539.1861108720154,
"children": {
"SubprocessEnvManager._take_step": {
"total": 293.4514078939701,
"count": 232707,
"self": 15.044724277893579,
"children": {
"TorchPolicy.evaluate": {
"total": 278.4066836160765,
"count": 222901,
"self": 68.47432931601952,
"children": {
"TorchPolicy.sample_actions": {
"total": 209.932354300057,
"count": 222901,
"self": 209.932354300057
}
}
}
}
},
"workers": {
"total": 2.7057854570024347,
"count": 232707,
"self": 0.0,
"children": {
"worker_root": {
"total": 2323.717643377024,
"count": 232707,
"is_parallel": true,
"self": 1058.689054605071,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018297139999958745,
"count": 1,
"is_parallel": true,
"self": 0.00032910800007357466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015006059999222998,
"count": 2,
"is_parallel": true,
"self": 0.0015006059999222998
}
}
},
"UnityEnvironment.step": {
"total": 0.0286146750000853,
"count": 1,
"is_parallel": true,
"self": 0.0002581600000439721,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002035869999872375,
"count": 1,
"is_parallel": true,
"self": 0.0002035869999872375
},
"communicator.exchange": {
"total": 0.02734713200004535,
"count": 1,
"is_parallel": true,
"self": 0.02734713200004535
},
"steps_from_proto": {
"total": 0.0008057960000087405,
"count": 1,
"is_parallel": true,
"self": 0.00030924500003948197,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004965509999692586,
"count": 2,
"is_parallel": true,
"self": 0.0004965509999692586
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1265.028588771953,
"count": 232706,
"is_parallel": true,
"self": 35.45462302300166,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.87760505698361,
"count": 232706,
"is_parallel": true,
"self": 82.87760505698361
},
"communicator.exchange": {
"total": 1049.3597882579436,
"count": 232706,
"is_parallel": true,
"self": 1049.3597882579436
},
"steps_from_proto": {
"total": 97.33657243402422,
"count": 232706,
"is_parallel": true,
"self": 42.5675959019469,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.76897653207732,
"count": 465412,
"is_parallel": true,
"self": 54.76897653207732
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 478.7392784590296,
"count": 232707,
"self": 6.085267702987153,
"children": {
"process_trajectory": {
"total": 158.4219921390427,
"count": 232707,
"self": 157.93367619804235,
"children": {
"RLTrainer._checkpoint": {
"total": 0.48831594100033726,
"count": 4,
"self": 0.48831594100033726
}
}
},
"_update_policy": {
"total": 314.23201861699977,
"count": 97,
"self": 260.9788152019844,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.25320341501538,
"count": 2910,
"self": 53.25320341501538
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.4600000011269e-07,
"count": 1,
"self": 9.4600000011269e-07
},
"TrainerController._save_models": {
"total": 0.12031048300013936,
"count": 1,
"self": 0.0019651229999908537,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1183453600001485,
"count": 1,
"self": 0.1183453600001485
}
}
}
}
}
}
}