ppo-Huggy / run_logs /timers.json
SD403's picture
Huggy-Bonus unit 1
7482b83 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4014148712158203,
"min": 1.4014148712158203,
"max": 1.4272955656051636,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70400.078125,
"min": 68547.234375,
"max": 76001.640625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 110.55429864253394,
"min": 92.9342105263158,
"max": 404.6693548387097,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48865.0,
"min": 48865.0,
"max": 50179.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999959.0,
"min": 49951.0,
"max": 1999959.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999959.0,
"min": 49951.0,
"max": 1999959.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.437720775604248,
"min": 0.07452195882797241,
"max": 2.437720775604248,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1077.4725341796875,
"min": 9.166200637817383,
"max": 1269.978759765625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8119092224949624,
"min": 1.9583585589397243,
"max": 3.890087508113639,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1684.8638763427734,
"min": 240.8781027495861,
"max": 2013.0522997379303,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8119092224949624,
"min": 1.9583585589397243,
"max": 3.890087508113639,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1684.8638763427734,
"min": 240.8781027495861,
"max": 2013.0522997379303,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016260323669929577,
"min": 0.014745389965780002,
"max": 0.02028070116357412,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04878097100978873,
"min": 0.029490779931560004,
"max": 0.0551369680154797,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0483387959914075,
"min": 0.021228618702540794,
"max": 0.055908536476393544,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1450163879742225,
"min": 0.04245723740508159,
"max": 0.16092953520516554,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.520148826650007e-06,
"min": 3.520148826650007e-06,
"max": 0.000295314226561925,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.056044647995002e-05,
"min": 1.056044647995002e-05,
"max": 0.0008441725686091498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10117335,
"min": 0.10117335,
"max": 0.198438075,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30352005,
"min": 0.20754655,
"max": 0.58139085,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.855016500000014e-05,
"min": 6.855016500000014e-05,
"max": 0.0049220599425,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020565049500000042,
"min": 0.00020565049500000042,
"max": 0.014071403415,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1730405338",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1730412505"
},
"total": 7167.2607121110195,
"count": 1,
"self": 0.9007620370248333,
"children": {
"run_training.setup": {
"total": 0.09493259000009857,
"count": 1,
"self": 0.09493259000009857
},
"TrainerController.start_learning": {
"total": 7166.265017483995,
"count": 1,
"self": 12.408496611984447,
"children": {
"TrainerController._reset_env": {
"total": 22.753301289019873,
"count": 1,
"self": 22.753301289019873
},
"TrainerController.advance": {
"total": 7130.79538634597,
"count": 231328,
"self": 12.708837991260225,
"children": {
"env_step": {
"total": 6060.412882412609,
"count": 231328,
"self": 4293.5929570726585,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1758.5567225435225,
"count": 231328,
"self": 47.698091401485726,
"children": {
"TorchPolicy.evaluate": {
"total": 1710.8586311420368,
"count": 222972,
"self": 1710.8586311420368
}
}
},
"workers": {
"total": 8.263202796428232,
"count": 231328,
"self": 0.0,
"children": {
"worker_root": {
"total": 7139.187829825649,
"count": 231328,
"is_parallel": true,
"self": 3591.911565355753,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012272559979464859,
"count": 1,
"is_parallel": true,
"self": 0.0003475359990261495,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008797199989203364,
"count": 2,
"is_parallel": true,
"self": 0.0008797199989203364
}
}
},
"UnityEnvironment.step": {
"total": 0.04008927897666581,
"count": 1,
"is_parallel": true,
"self": 0.00035164697328582406,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005944780132267624,
"count": 1,
"is_parallel": true,
"self": 0.0005944780132267624
},
"communicator.exchange": {
"total": 0.03860907498165034,
"count": 1,
"is_parallel": true,
"self": 0.03860907498165034
},
"steps_from_proto": {
"total": 0.0005340790085028857,
"count": 1,
"is_parallel": true,
"self": 0.0001280830183532089,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004059959901496768,
"count": 2,
"is_parallel": true,
"self": 0.0004059959901496768
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3547.276264469896,
"count": 231327,
"is_parallel": true,
"self": 74.33092943520751,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 103.96591785352211,
"count": 231327,
"is_parallel": true,
"self": 103.96591785352211
},
"communicator.exchange": {
"total": 3216.4840749522264,
"count": 231327,
"is_parallel": true,
"self": 3216.4840749522264
},
"steps_from_proto": {
"total": 152.49534222894,
"count": 231327,
"is_parallel": true,
"self": 49.18278466488118,
"children": {
"_process_rank_one_or_two_observation": {
"total": 103.3125575640588,
"count": 462654,
"is_parallel": true,
"self": 103.3125575640588
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1057.6736659421003,
"count": 231328,
"self": 18.54045887934626,
"children": {
"process_trajectory": {
"total": 377.34510923858033,
"count": 231328,
"self": 374.3203545535507,
"children": {
"RLTrainer._checkpoint": {
"total": 3.0247546850296203,
"count": 10,
"self": 3.0247546850296203
}
}
},
"_update_policy": {
"total": 661.7880978241737,
"count": 97,
"self": 410.7747441549145,
"children": {
"TorchPPOOptimizer.update": {
"total": 251.01335366925923,
"count": 2910,
"self": 251.01335366925923
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.870023647323251e-06,
"count": 1,
"self": 3.870023647323251e-06
},
"TrainerController._save_models": {
"total": 0.30782936699688435,
"count": 1,
"self": 0.006050847994629294,
"children": {
"RLTrainer._checkpoint": {
"total": 0.30177851900225505,
"count": 1,
"self": 0.30177851900225505
}
}
}
}
}
}
}