ppo-Huggy / run_logs /timers.json
daspartho's picture
Huggy
15e90e5
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4059650897979736,
"min": 1.4059650897979736,
"max": 1.426599383354187,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69872.25,
"min": 67588.8984375,
"max": 77409.015625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 75.50611620795107,
"min": 74.77389984825493,
"max": 393.7421875,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49381.0,
"min": 49242.0,
"max": 50399.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999917.0,
"min": 49854.0,
"max": 1999917.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999917.0,
"min": 49854.0,
"max": 1999917.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4681060314178467,
"min": 0.016926603391766548,
"max": 2.551644802093506,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1614.141357421875,
"min": 2.1496787071228027,
"max": 1646.7947998046875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8020241330705287,
"min": 1.7957424251582679,
"max": 4.000807588699304,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2486.5237830281258,
"min": 228.05928799510002,
"max": 2532.51120364666,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8020241330705287,
"min": 1.7957424251582679,
"max": 4.000807588699304,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2486.5237830281258,
"min": 228.05928799510002,
"max": 2532.51120364666,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014661965017957199,
"min": 0.014280411827636879,
"max": 0.019650822441326453,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0439858950538716,
"min": 0.028560823655273758,
"max": 0.05445032534286535,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.060951405556665526,
"min": 0.02243422040094932,
"max": 0.060951405556665526,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18285421666999657,
"min": 0.04486844080189864,
"max": 0.18285421666999657,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.886598704500004e-06,
"min": 3.886598704500004e-06,
"max": 0.00029528025157324995,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1659796113500012e-05,
"min": 1.1659796113500012e-05,
"max": 0.00084387616870795,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10129549999999998,
"min": 0.10129549999999998,
"max": 0.19842674999999999,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30388649999999995,
"min": 0.20772110000000005,
"max": 0.5812920500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.464545000000009e-05,
"min": 7.464545000000009e-05,
"max": 0.004921494825000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022393635000000025,
"min": 0.00022393635000000025,
"max": 0.014066473295000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671130337",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671132544"
},
"total": 2207.198799843,
"count": 1,
"self": 0.43960382099976414,
"children": {
"run_training.setup": {
"total": 0.1162150849999648,
"count": 1,
"self": 0.1162150849999648
},
"TrainerController.start_learning": {
"total": 2206.642980937,
"count": 1,
"self": 3.8673666589993445,
"children": {
"TrainerController._reset_env": {
"total": 10.35351657800004,
"count": 1,
"self": 10.35351657800004
},
"TrainerController.advance": {
"total": 2192.3119086660013,
"count": 233364,
"self": 4.058690985916201,
"children": {
"env_step": {
"total": 1722.5435091320517,
"count": 233364,
"self": 1439.6619330170122,
"children": {
"SubprocessEnvManager._take_step": {
"total": 280.30202687398844,
"count": 233364,
"self": 14.227833400920815,
"children": {
"TorchPolicy.evaluate": {
"total": 266.0741934730676,
"count": 222936,
"self": 66.76368137296078,
"children": {
"TorchPolicy.sample_actions": {
"total": 199.31051210010685,
"count": 222936,
"self": 199.31051210010685
}
}
}
}
},
"workers": {
"total": 2.5795492410510974,
"count": 233364,
"self": 0.0,
"children": {
"worker_root": {
"total": 2198.829453913908,
"count": 233364,
"is_parallel": true,
"self": 1019.0282172398599,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00212498799999139,
"count": 1,
"is_parallel": true,
"self": 0.000326005999966128,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001798982000025262,
"count": 2,
"is_parallel": true,
"self": 0.001798982000025262
}
}
},
"UnityEnvironment.step": {
"total": 0.02732715700000199,
"count": 1,
"is_parallel": true,
"self": 0.0002595580000388509,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002532739999878686,
"count": 1,
"is_parallel": true,
"self": 0.0002532739999878686
},
"communicator.exchange": {
"total": 0.026082245999987208,
"count": 1,
"is_parallel": true,
"self": 0.026082245999987208
},
"steps_from_proto": {
"total": 0.0007320789999880617,
"count": 1,
"is_parallel": true,
"self": 0.0002609310000138976,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004711479999741641,
"count": 2,
"is_parallel": true,
"self": 0.0004711479999741641
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1179.801236674048,
"count": 233363,
"is_parallel": true,
"self": 34.15218985102047,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.52464205793194,
"count": 233363,
"is_parallel": true,
"self": 77.52464205793194
},
"communicator.exchange": {
"total": 974.3620318890507,
"count": 233363,
"is_parallel": true,
"self": 974.3620318890507
},
"steps_from_proto": {
"total": 93.76237287604471,
"count": 233363,
"is_parallel": true,
"self": 40.623474592052446,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.13889828399226,
"count": 466726,
"is_parallel": true,
"self": 53.13889828399226
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 465.7097085480333,
"count": 233364,
"self": 5.881519831075877,
"children": {
"process_trajectory": {
"total": 150.52629407995698,
"count": 233364,
"self": 149.38582835695706,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1404657229999202,
"count": 10,
"self": 1.1404657229999202
}
}
},
"_update_policy": {
"total": 309.30189463700043,
"count": 97,
"self": 256.40232778099084,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.899566856009585,
"count": 2910,
"self": 52.899566856009585
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.319998414663132e-07,
"count": 1,
"self": 8.319998414663132e-07
},
"TrainerController._save_models": {
"total": 0.11018820199979018,
"count": 1,
"self": 0.0019271439996373374,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10826105800015284,
"count": 1,
"self": 0.10826105800015284
}
}
}
}
}
}
}