MyreponamedLONG / run_logs /timers.json
Alfaisal101's picture
Huggy
9d0e8ec verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4005805253982544,
"min": 1.4005805253982544,
"max": 1.4281638860702515,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68545.8125,
"min": 67160.5234375,
"max": 79242.234375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 76.4860248447205,
"min": 73.1186943620178,
"max": 376.93283582089555,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49257.0,
"min": 48708.0,
"max": 50509.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999918.0,
"min": 49893.0,
"max": 1999918.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999918.0,
"min": 49893.0,
"max": 1999918.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4772305488586426,
"min": 0.07305292785167694,
"max": 2.562021493911743,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1595.33642578125,
"min": 9.716039657592773,
"max": 1664.73046875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.730313070349812,
"min": 1.8338867336287534,
"max": 4.031850891654231,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2402.321617305279,
"min": 243.9069355726242,
"max": 2669.233488380909,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.730313070349812,
"min": 1.8338867336287534,
"max": 4.031850891654231,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2402.321617305279,
"min": 243.9069355726242,
"max": 2669.233488380909,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017973134950847857,
"min": 0.012507035176774177,
"max": 0.021165615751912507,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05391940485254357,
"min": 0.025014070353548353,
"max": 0.062076813580157854,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.062195644982986985,
"min": 0.022471612940231957,
"max": 0.062195644982986985,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18658693494896095,
"min": 0.04494322588046391,
"max": 0.18658693494896095,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.8458487180833375e-06,
"min": 3.8458487180833375e-06,
"max": 0.00029530357656547496,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1537546154250012e-05,
"min": 1.1537546154250012e-05,
"max": 0.0008440599186466998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10128191666666665,
"min": 0.10128191666666665,
"max": 0.19843452500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30384574999999997,
"min": 0.20769235000000008,
"max": 0.5813533000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.396764166666673e-05,
"min": 7.396764166666673e-05,
"max": 0.004921882797499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022190292500000018,
"min": 0.00022190292500000018,
"max": 0.014069529670000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1708941125",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1708943577"
},
"total": 2451.742295525,
"count": 1,
"self": 0.4454345059998559,
"children": {
"run_training.setup": {
"total": 0.04738232599993353,
"count": 1,
"self": 0.04738232599993353
},
"TrainerController.start_learning": {
"total": 2451.249478693,
"count": 1,
"self": 4.67371959501088,
"children": {
"TrainerController._reset_env": {
"total": 3.5522919909999473,
"count": 1,
"self": 3.5522919909999473
},
"TrainerController.advance": {
"total": 2442.907659734989,
"count": 233664,
"self": 4.9587129260294205,
"children": {
"env_step": {
"total": 1972.102059192921,
"count": 233664,
"self": 1640.6750518519564,
"children": {
"SubprocessEnvManager._take_step": {
"total": 328.46186083202974,
"count": 233664,
"self": 16.79490582208018,
"children": {
"TorchPolicy.evaluate": {
"total": 311.66695500994956,
"count": 222932,
"self": 311.66695500994956
}
}
},
"workers": {
"total": 2.965146508934822,
"count": 233664,
"self": 0.0,
"children": {
"worker_root": {
"total": 2443.8701504207525,
"count": 233664,
"is_parallel": true,
"self": 1107.537497732758,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009294519998093165,
"count": 1,
"is_parallel": true,
"self": 0.0002436509998915426,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006858009999177739,
"count": 2,
"is_parallel": true,
"self": 0.0006858009999177739
}
}
},
"UnityEnvironment.step": {
"total": 0.03162354900018727,
"count": 1,
"is_parallel": true,
"self": 0.0003018970000994159,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002399320001131855,
"count": 1,
"is_parallel": true,
"self": 0.0002399320001131855
},
"communicator.exchange": {
"total": 0.030351787999961743,
"count": 1,
"is_parallel": true,
"self": 0.030351787999961743
},
"steps_from_proto": {
"total": 0.0007299320000129228,
"count": 1,
"is_parallel": true,
"self": 0.00019584199981181882,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000534090000201104,
"count": 2,
"is_parallel": true,
"self": 0.000534090000201104
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1336.3326526879944,
"count": 233663,
"is_parallel": true,
"self": 41.5700427896968,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 86.09920983311963,
"count": 233663,
"is_parallel": true,
"self": 86.09920983311963
},
"communicator.exchange": {
"total": 1114.1674411281099,
"count": 233663,
"is_parallel": true,
"self": 1114.1674411281099
},
"steps_from_proto": {
"total": 94.49595893706805,
"count": 233663,
"is_parallel": true,
"self": 35.08781608924437,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.40814284782368,
"count": 467326,
"is_parallel": true,
"self": 59.40814284782368
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 465.84688761603843,
"count": 233664,
"self": 6.874924010108316,
"children": {
"process_trajectory": {
"total": 165.30195982992973,
"count": 233664,
"self": 164.0755737119298,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2263861179999367,
"count": 10,
"self": 1.2263861179999367
}
}
},
"_update_policy": {
"total": 293.6700037760004,
"count": 97,
"self": 232.94267168100782,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.72733209499256,
"count": 2910,
"self": 60.72733209499256
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.150000212481245e-07,
"count": 1,
"self": 8.150000212481245e-07
},
"TrainerController._save_models": {
"total": 0.1158065570002691,
"count": 1,
"self": 0.0019487290005599789,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11385782799970912,
"count": 1,
"self": 0.11385782799970912
}
}
}
}
}
}
}