ppo-Huggy / run_logs /timers.json
jinghua2tang's picture
Huggy
8194ecb
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4061193466186523,
"min": 1.4061193466186523,
"max": 1.4269922971725464,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70684.2109375,
"min": 68727.21875,
"max": 77591.640625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 91.1268656716418,
"min": 76.50154798761609,
"max": 396.3937007874016,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48844.0,
"min": 48844.0,
"max": 50342.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999911.0,
"min": 49719.0,
"max": 1999911.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999911.0,
"min": 49719.0,
"max": 1999911.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4395267963409424,
"min": 0.11863066256046295,
"max": 2.506622552871704,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1307.58642578125,
"min": 14.947463035583496,
"max": 1579.558837890625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.713567373134307,
"min": 1.7764158239440313,
"max": 3.980026124779411,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1990.4721119999886,
"min": 223.82839381694794,
"max": 2468.954322040081,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.713567373134307,
"min": 1.7764158239440313,
"max": 3.980026124779411,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1990.4721119999886,
"min": 223.82839381694794,
"max": 2468.954322040081,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014622876138633325,
"min": 0.011970390451218312,
"max": 0.0193470511760097,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.043868628415899974,
"min": 0.023940780902436624,
"max": 0.055876776207393654,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05402226179010339,
"min": 0.022522084570179384,
"max": 0.06194113016956382,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16206678537031016,
"min": 0.04504416914035877,
"max": 0.18582339050869146,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5907488031166727e-06,
"min": 3.5907488031166727e-06,
"max": 0.000295281376572875,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0772246409350018e-05,
"min": 1.0772246409350018e-05,
"max": 0.0008438596687134498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1011968833333333,
"min": 0.1011968833333333,
"max": 0.19842712499999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035906499999999,
"min": 0.20752595000000007,
"max": 0.5812865500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.972447833333344e-05,
"min": 6.972447833333344e-05,
"max": 0.004921513537500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020917343500000032,
"min": 0.00020917343500000032,
"max": 0.014066198844999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670536171",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670538406"
},
"total": 2234.8896655989997,
"count": 1,
"self": 0.4124267949996465,
"children": {
"run_training.setup": {
"total": 0.10632406899998159,
"count": 1,
"self": 0.10632406899998159
},
"TrainerController.start_learning": {
"total": 2234.370914735,
"count": 1,
"self": 3.8463366140013022,
"children": {
"TrainerController._reset_env": {
"total": 10.500289825000038,
"count": 1,
"self": 10.500289825000038
},
"TrainerController.advance": {
"total": 2219.9109878359986,
"count": 232921,
"self": 4.07788950596705,
"children": {
"env_step": {
"total": 1730.911766980011,
"count": 232921,
"self": 1447.1588872611055,
"children": {
"SubprocessEnvManager._take_step": {
"total": 281.19332535193456,
"count": 232921,
"self": 14.21985231590844,
"children": {
"TorchPolicy.evaluate": {
"total": 266.9734730360261,
"count": 223070,
"self": 66.37330594301187,
"children": {
"TorchPolicy.sample_actions": {
"total": 200.60016709301425,
"count": 223070,
"self": 200.60016709301425
}
}
}
}
},
"workers": {
"total": 2.559554366970815,
"count": 232921,
"self": 0.0,
"children": {
"worker_root": {
"total": 2226.527294033086,
"count": 232921,
"is_parallel": true,
"self": 1043.127025545959,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020654690000014853,
"count": 1,
"is_parallel": true,
"self": 0.00031901800002742675,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017464509999740585,
"count": 2,
"is_parallel": true,
"self": 0.0017464509999740585
}
}
},
"UnityEnvironment.step": {
"total": 0.025356830999953672,
"count": 1,
"is_parallel": true,
"self": 0.0002586269998801072,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001807620000136012,
"count": 1,
"is_parallel": true,
"self": 0.0001807620000136012
},
"communicator.exchange": {
"total": 0.024237574000039785,
"count": 1,
"is_parallel": true,
"self": 0.024237574000039785
},
"steps_from_proto": {
"total": 0.0006798680000201784,
"count": 1,
"is_parallel": true,
"self": 0.00023272199996426934,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004471460000559091,
"count": 2,
"is_parallel": true,
"self": 0.0004471460000559091
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1183.400268487127,
"count": 232920,
"is_parallel": true,
"self": 34.141360162210276,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.0830391429551,
"count": 232920,
"is_parallel": true,
"self": 75.0830391429551
},
"communicator.exchange": {
"total": 983.4649145569915,
"count": 232920,
"is_parallel": true,
"self": 983.4649145569915
},
"steps_from_proto": {
"total": 90.7109546249701,
"count": 232920,
"is_parallel": true,
"self": 37.36690378096711,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.34405084400299,
"count": 465840,
"is_parallel": true,
"self": 53.34405084400299
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 484.9213313500208,
"count": 232921,
"self": 5.96053100809695,
"children": {
"process_trajectory": {
"total": 151.90554556392465,
"count": 232921,
"self": 151.3885789609244,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5169666030002418,
"count": 4,
"self": 0.5169666030002418
}
}
},
"_update_policy": {
"total": 327.0552547779992,
"count": 97,
"self": 272.13421096100063,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.921043816998576,
"count": 2910,
"self": 54.921043816998576
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.560000424040481e-07,
"count": 1,
"self": 8.560000424040481e-07
},
"TrainerController._save_models": {
"total": 0.11329960400007622,
"count": 1,
"self": 0.0019135500001539185,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1113860539999223,
"count": 1,
"self": 0.1113860539999223
}
}
}
}
}
}
}