ppo-Huggy / run_logs /timers.json
SamJoshua's picture
Huggy
d23fed7
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4050054550170898,
"min": 1.4050054550170898,
"max": 1.426466941833496,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71059.5546875,
"min": 68346.40625,
"max": 77517.46875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 90.17304189435337,
"min": 83.29005059021922,
"max": 396.94444444444446,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49505.0,
"min": 49008.0,
"max": 50116.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999990.0,
"min": 49538.0,
"max": 1999990.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999990.0,
"min": 49538.0,
"max": 1999990.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4223861694335938,
"min": -0.013070471584796906,
"max": 2.47235107421875,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1329.8900146484375,
"min": -1.633808970451355,
"max": 1441.230224609375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.673060900743759,
"min": 1.835397591114044,
"max": 3.904925320413895,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2016.5104345083237,
"min": 229.42469888925552,
"max": 2231.667940735817,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.673060900743759,
"min": 1.835397591114044,
"max": 3.904925320413895,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2016.5104345083237,
"min": 229.42469888925552,
"max": 2231.667940735817,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015100820154637202,
"min": 0.012926023934051045,
"max": 0.020140935306618406,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.045302460463911606,
"min": 0.02585204786810209,
"max": 0.060422805919855216,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0544252404736148,
"min": 0.023119274775187174,
"max": 0.06044914765904347,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1632757214208444,
"min": 0.04623854955037435,
"max": 0.17867978376646837,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.2009989330333284e-06,
"min": 3.2009989330333284e-06,
"max": 0.000295292026569325,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.602996799099985e-06,
"min": 9.602996799099985e-06,
"max": 0.00084390826869725,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10106696666666666,
"min": 0.10106696666666666,
"max": 0.198430675,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3032009,
"min": 0.20729844999999994,
"max": 0.5813027500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.324163666666661e-05,
"min": 6.324163666666661e-05,
"max": 0.004921690682500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018972490999999983,
"min": 0.00018972490999999983,
"max": 0.014067007225,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674399470",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674401596"
},
"total": 2125.470415857,
"count": 1,
"self": 0.39231266299975687,
"children": {
"run_training.setup": {
"total": 0.11053012399997897,
"count": 1,
"self": 0.11053012399997897
},
"TrainerController.start_learning": {
"total": 2124.96757307,
"count": 1,
"self": 3.7658200860582838,
"children": {
"TrainerController._reset_env": {
"total": 10.221566528999972,
"count": 1,
"self": 10.221566528999972
},
"TrainerController.advance": {
"total": 2110.8687937959417,
"count": 232302,
"self": 3.9119652659719577,
"children": {
"env_step": {
"total": 1661.0563144039554,
"count": 232302,
"self": 1394.0096496128915,
"children": {
"SubprocessEnvManager._take_step": {
"total": 264.6021578470515,
"count": 232302,
"self": 13.66832179706114,
"children": {
"TorchPolicy.evaluate": {
"total": 250.93383604999036,
"count": 223024,
"self": 63.34719635904014,
"children": {
"TorchPolicy.sample_actions": {
"total": 187.58663969095022,
"count": 223024,
"self": 187.58663969095022
}
}
}
}
},
"workers": {
"total": 2.44450694401246,
"count": 232302,
"self": 0.0,
"children": {
"worker_root": {
"total": 2117.183505395948,
"count": 232302,
"is_parallel": true,
"self": 970.1867642459383,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005973197999992408,
"count": 1,
"is_parallel": true,
"self": 0.0003451019999829441,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005628096000009464,
"count": 2,
"is_parallel": true,
"self": 0.005628096000009464
}
}
},
"UnityEnvironment.step": {
"total": 0.033390313000040805,
"count": 1,
"is_parallel": true,
"self": 0.0003062030000364757,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017908899997109984,
"count": 1,
"is_parallel": true,
"self": 0.00017908899997109984
},
"communicator.exchange": {
"total": 0.032198567000023104,
"count": 1,
"is_parallel": true,
"self": 0.032198567000023104
},
"steps_from_proto": {
"total": 0.000706454000010126,
"count": 1,
"is_parallel": true,
"self": 0.00024187399992570136,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004645800000844247,
"count": 2,
"is_parallel": true,
"self": 0.0004645800000844247
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1146.9967411500097,
"count": 232301,
"is_parallel": true,
"self": 33.162797501857995,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 72.02616308098362,
"count": 232301,
"is_parallel": true,
"self": 72.02616308098362
},
"communicator.exchange": {
"total": 954.3269638700891,
"count": 232301,
"is_parallel": true,
"self": 954.3269638700891
},
"steps_from_proto": {
"total": 87.48081669707904,
"count": 232301,
"is_parallel": true,
"self": 35.66905082307346,
"children": {
"_process_rank_one_or_two_observation": {
"total": 51.81176587400557,
"count": 464602,
"is_parallel": true,
"self": 51.81176587400557
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 445.9005141260141,
"count": 232302,
"self": 5.970040178974216,
"children": {
"process_trajectory": {
"total": 140.35670070004,
"count": 232302,
"self": 139.2322953900396,
"children": {
"RLTrainer._checkpoint": {
"total": 1.124405310000384,
"count": 10,
"self": 1.124405310000384
}
}
},
"_update_policy": {
"total": 299.5737732469999,
"count": 97,
"self": 246.70307008600543,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.87070316099448,
"count": 2910,
"self": 52.87070316099448
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.930001058615744e-07,
"count": 1,
"self": 8.930001058615744e-07
},
"TrainerController._save_models": {
"total": 0.111391765999997,
"count": 1,
"self": 0.0020617179998225765,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10933004800017443,
"count": 1,
"self": 0.10933004800017443
}
}
}
}
}
}
}