ppo-Huggy / run_logs /timers.json
sjainlucky's picture
Huggy
7d0bd00
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4026494026184082,
"min": 1.4026494026184082,
"max": 1.4272713661193848,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70978.265625,
"min": 68758.8671875,
"max": 78664.953125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 84.61986301369863,
"min": 83.82462057335582,
"max": 384.48091603053433,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49418.0,
"min": 49129.0,
"max": 50367.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999953.0,
"min": 49748.0,
"max": 1999953.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999953.0,
"min": 49748.0,
"max": 1999953.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3710224628448486,
"min": 0.051666051149368286,
"max": 2.452930212020874,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1384.6771240234375,
"min": 6.716586589813232,
"max": 1435.35888671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6579249703312575,
"min": 1.8194147303700448,
"max": 3.9259893586623527,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2136.2281826734543,
"min": 236.5239149481058,
"max": 2328.111689686775,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6579249703312575,
"min": 1.8194147303700448,
"max": 3.9259893586623527,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2136.2281826734543,
"min": 236.5239149481058,
"max": 2328.111689686775,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01803078762928231,
"min": 0.01419157469451117,
"max": 0.021592735803763693,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.054092362887846926,
"min": 0.02838314938902234,
"max": 0.056129414393399205,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05417680959734652,
"min": 0.023246020389099918,
"max": 0.061953774177365833,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16253042879203955,
"min": 0.046492040778199836,
"max": 0.1858613225320975,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.421748859450009e-06,
"min": 3.421748859450009e-06,
"max": 0.00029535135154955,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0265246578350026e-05,
"min": 1.0265246578350026e-05,
"max": 0.00084429706856765,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10114055,
"min": 0.10114055,
"max": 0.19845045000000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30342165,
"min": 0.20741350000000008,
"max": 0.58143235,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.691344500000016e-05,
"min": 6.691344500000016e-05,
"max": 0.004922677455,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020074033500000048,
"min": 0.00020074033500000048,
"max": 0.014073474264999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677072245",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1677074696"
},
"total": 2450.928094976,
"count": 1,
"self": 0.437480510000114,
"children": {
"run_training.setup": {
"total": 0.1171684670000559,
"count": 1,
"self": 0.1171684670000559
},
"TrainerController.start_learning": {
"total": 2450.373445999,
"count": 1,
"self": 4.636089918984453,
"children": {
"TrainerController._reset_env": {
"total": 10.563923882999916,
"count": 1,
"self": 10.563923882999916
},
"TrainerController.advance": {
"total": 2435.0598067010155,
"count": 232212,
"self": 4.973974776872183,
"children": {
"env_step": {
"total": 1907.522884557066,
"count": 232212,
"self": 1590.620015143071,
"children": {
"SubprocessEnvManager._take_step": {
"total": 314.0034171329795,
"count": 232212,
"self": 16.4659212199374,
"children": {
"TorchPolicy.evaluate": {
"total": 297.5374959130421,
"count": 222980,
"self": 73.39676506896683,
"children": {
"TorchPolicy.sample_actions": {
"total": 224.14073084407528,
"count": 222980,
"self": 224.14073084407528
}
}
}
}
},
"workers": {
"total": 2.899452281015442,
"count": 232212,
"self": 0.0,
"children": {
"worker_root": {
"total": 2442.0112828779497,
"count": 232212,
"is_parallel": true,
"self": 1150.9650530869114,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002256392999925083,
"count": 1,
"is_parallel": true,
"self": 0.0003967830000419781,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001859609999883105,
"count": 2,
"is_parallel": true,
"self": 0.001859609999883105
}
}
},
"UnityEnvironment.step": {
"total": 0.03179081500002212,
"count": 1,
"is_parallel": true,
"self": 0.00033575000009022915,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021701299999676849,
"count": 1,
"is_parallel": true,
"self": 0.00021701299999676849
},
"communicator.exchange": {
"total": 0.030456364999963625,
"count": 1,
"is_parallel": true,
"self": 0.030456364999963625
},
"steps_from_proto": {
"total": 0.0007816869999714982,
"count": 1,
"is_parallel": true,
"self": 0.0002796070000385953,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005020799999329029,
"count": 2,
"is_parallel": true,
"self": 0.0005020799999329029
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1291.0462297910383,
"count": 232211,
"is_parallel": true,
"self": 38.17835343186766,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.05885413602482,
"count": 232211,
"is_parallel": true,
"self": 83.05885413602482
},
"communicator.exchange": {
"total": 1070.7461917670325,
"count": 232211,
"is_parallel": true,
"self": 1070.7461917670325
},
"steps_from_proto": {
"total": 99.06283045611337,
"count": 232211,
"is_parallel": true,
"self": 40.48108953709891,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.581740919014464,
"count": 464422,
"is_parallel": true,
"self": 58.581740919014464
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 522.5629473670775,
"count": 232212,
"self": 6.79390180011967,
"children": {
"process_trajectory": {
"total": 171.0187851979598,
"count": 232212,
"self": 169.73317456996028,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2856106279995174,
"count": 10,
"self": 1.2856106279995174
}
}
},
"_update_policy": {
"total": 344.75026036899806,
"count": 97,
"self": 287.2776024819897,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.47265788700838,
"count": 2910,
"self": 57.47265788700838
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.140003385255113e-07,
"count": 1,
"self": 9.140003385255113e-07
},
"TrainerController._save_models": {
"total": 0.11362458199982939,
"count": 1,
"self": 0.002109075000134908,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11151550699969448,
"count": 1,
"self": 0.11151550699969448
}
}
}
}
}
}
}