FazzNew / run_logs /timers.json
FazzOfficial's picture
Huggy
fbd7c41
raw
history blame contribute delete
No virus
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4238355159759521,
"min": 1.419805645942688,
"max": 1.4298722743988037,
"count": 14
},
"Huggy.Policy.Entropy.sum": {
"value": 71146.2109375,
"min": 66331.84375,
"max": 78982.3671875,
"count": 14
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 95.46332046332046,
"min": 91.2269372693727,
"max": 391.453125,
"count": 14
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49450.0,
"min": 49445.0,
"max": 50106.0,
"count": 14
},
"Huggy.Step.mean": {
"value": 699979.0,
"min": 49823.0,
"max": 699979.0,
"count": 14
},
"Huggy.Step.sum": {
"value": 699979.0,
"min": 49823.0,
"max": 699979.0,
"count": 14
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3141157627105713,
"min": 0.15440233051776886,
"max": 2.3161802291870117,
"count": 14
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1198.7119140625,
"min": 19.60909652709961,
"max": 1255.36962890625,
"count": 14
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9000628438695517,
"min": 1.8800696136913901,
"max": 3.9540747115943606,
"count": 14
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2020.2325531244278,
"min": 238.76884093880653,
"max": 2086.554102540016,
"count": 14
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9000628438695517,
"min": 1.8800696136913901,
"max": 3.9540747115943606,
"count": 14
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2020.2325531244278,
"min": 238.76884093880653,
"max": 2086.554102540016,
"count": 14
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017955272801110467,
"min": 0.013424618318094873,
"max": 0.017955272801110467,
"count": 14
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.053865818403331406,
"min": 0.026849236636189745,
"max": 0.053865818403331406,
"count": 14
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.051895946015914275,
"min": 0.02051876625046134,
"max": 0.054778467077347966,
"count": 14
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15568783804774283,
"min": 0.04103753250092268,
"max": 0.1643354012320439,
"count": 14
},
"Huggy.Policy.LearningRate.mean": {
"value": 0.00019812363395879995,
"min": 0.00019812363395879995,
"max": 0.00029537272654242487,
"count": 14
},
"Huggy.Policy.LearningRate.sum": {
"value": 0.0005943709018763998,
"min": 0.00041172021275995003,
"max": 0.0008443492685502498,
"count": 14
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.16604120000000003,
"min": 0.16604120000000003,
"max": 0.198457575,
"count": 14
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.49812360000000006,
"min": 0.33724005,
"max": 0.5814497499999999,
"count": 14
},
"Huggy.Policy.Beta.mean": {
"value": 0.0033054558800000005,
"min": 0.0033054558800000005,
"max": 0.004923032992500001,
"count": 14
},
"Huggy.Policy.Beta.sum": {
"value": 0.009916367640000001,
"min": 0.006868278495000001,
"max": 0.014074342525,
"count": 14
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 14
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 14
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691522581",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=HuggyDog --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691523511"
},
"total": 930.2215195579993,
"count": 1,
"self": 0.43116566499975306,
"children": {
"run_training.setup": {
"total": 0.06509537500005536,
"count": 1,
"self": 0.06509537500005536
},
"TrainerController.start_learning": {
"total": 929.7252585179995,
"count": 1,
"self": 1.6601519538216962,
"children": {
"TrainerController._reset_env": {
"total": 5.591475775999243,
"count": 1,
"self": 5.591475775999243
},
"TrainerController.advance": {
"total": 922.4695631191789,
"count": 84946,
"self": 1.7502018342511292,
"children": {
"env_step": {
"total": 712.0731831270723,
"count": 84946,
"self": 600.5820600768138,
"children": {
"SubprocessEnvManager._take_step": {
"total": 110.39332339115117,
"count": 84946,
"self": 6.422089406204577,
"children": {
"TorchPolicy.evaluate": {
"total": 103.9712339849466,
"count": 82314,
"self": 103.9712339849466
}
}
},
"workers": {
"total": 1.0977996591072952,
"count": 84945,
"self": 0.0,
"children": {
"worker_root": {
"total": 927.0309405731077,
"count": 84945,
"is_parallel": true,
"self": 438.3287538581444,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009106710003834451,
"count": 1,
"is_parallel": true,
"self": 0.0002571340010035783,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006535369993798668,
"count": 2,
"is_parallel": true,
"self": 0.0006535369993798668
}
}
},
"UnityEnvironment.step": {
"total": 0.041693715999826964,
"count": 1,
"is_parallel": true,
"self": 0.0003351969990035286,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002150430000256165,
"count": 1,
"is_parallel": true,
"self": 0.0002150430000256165
},
"communicator.exchange": {
"total": 0.04035608000049251,
"count": 1,
"is_parallel": true,
"self": 0.04035608000049251
},
"steps_from_proto": {
"total": 0.0007873960003053071,
"count": 1,
"is_parallel": true,
"self": 0.00022174300102051347,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005656529992847936,
"count": 2,
"is_parallel": true,
"self": 0.0005656529992847936
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 488.7021867149633,
"count": 84944,
"is_parallel": true,
"self": 14.883015836033337,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.85617093287874,
"count": 84944,
"is_parallel": true,
"self": 30.85617093287874
},
"communicator.exchange": {
"total": 406.0378752730121,
"count": 84944,
"is_parallel": true,
"self": 406.0378752730121
},
"steps_from_proto": {
"total": 36.925124673039136,
"count": 84944,
"is_parallel": true,
"self": 13.196905031096321,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.728219641942815,
"count": 169888,
"is_parallel": true,
"self": 23.728219641942815
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 208.6461781578555,
"count": 84945,
"self": 2.4751856877055616,
"children": {
"process_trajectory": {
"total": 49.0173655121489,
"count": 84945,
"self": 48.58130861414884,
"children": {
"RLTrainer._checkpoint": {
"total": 0.43605689800006076,
"count": 3,
"self": 0.43605689800006076
}
}
},
"_update_policy": {
"total": 157.15362695800104,
"count": 35,
"self": 134.7881245940016,
"children": {
"TorchPPOOptimizer.update": {
"total": 22.36550236399944,
"count": 1050,
"self": 22.36550236399944
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4030001693754457e-06,
"count": 1,
"self": 1.4030001693754457e-06
},
"TrainerController._save_models": {
"total": 0.004066265999426832,
"count": 1,
"self": 2.552599926275434e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.004040740000164078,
"count": 1,
"self": 0.004040740000164078
}
}
}
}
}
}
}