ppo-Huggy / run_logs /timers.json
fortminors's picture
Huggy
8e9fa37 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4044992923736572,
"min": 1.4044992923736572,
"max": 1.4283496141433716,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71925.8125,
"min": 68109.9765625,
"max": 76183.65625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 87.3286219081272,
"min": 83.72100840336134,
"max": 387.74418604651163,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49428.0,
"min": 48811.0,
"max": 50156.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999293.0,
"min": 49549.0,
"max": 1999293.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999293.0,
"min": 49549.0,
"max": 1999293.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4217045307159424,
"min": 0.10306788235902786,
"max": 2.431812047958374,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1370.684814453125,
"min": 13.192688941955566,
"max": 1408.5982666015625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7638103355367276,
"min": 1.7698880229145288,
"max": 3.862664894030912,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2130.316649913788,
"min": 226.5456669330597,
"max": 2140.7222737669945,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7638103355367276,
"min": 1.7698880229145288,
"max": 3.862664894030912,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2130.316649913788,
"min": 226.5456669330597,
"max": 2140.7222737669945,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015190151141481086,
"min": 0.013846115457514921,
"max": 0.01988409300392959,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.030380302282962172,
"min": 0.027692230915029842,
"max": 0.057439142674168886,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05671184000869592,
"min": 0.021741592853019635,
"max": 0.06142594572156668,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.11342368001739184,
"min": 0.04348318570603927,
"max": 0.17643352039158344,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.492298502599997e-06,
"min": 4.492298502599997e-06,
"max": 0.00029535900154699997,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.984597005199993e-06,
"min": 8.984597005199993e-06,
"max": 0.0008441269686243502,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10149739999999999,
"min": 0.10149739999999999,
"max": 0.19845300000000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20299479999999998,
"min": 0.20299479999999998,
"max": 0.5813756500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.472025999999997e-05,
"min": 8.472025999999997e-05,
"max": 0.0049228047,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016944051999999993,
"min": 0.00016944051999999993,
"max": 0.014070644935000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1722082487",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1722084933"
},
"total": 2445.433934776,
"count": 1,
"self": 0.43912209199970675,
"children": {
"run_training.setup": {
"total": 0.05769945000008647,
"count": 1,
"self": 0.05769945000008647
},
"TrainerController.start_learning": {
"total": 2444.937113234,
"count": 1,
"self": 4.400237089961593,
"children": {
"TrainerController._reset_env": {
"total": 2.808093553000049,
"count": 1,
"self": 2.808093553000049
},
"TrainerController.advance": {
"total": 2437.6094777040385,
"count": 231714,
"self": 4.790148314107682,
"children": {
"env_step": {
"total": 1936.4896108719267,
"count": 231714,
"self": 1596.1095719680568,
"children": {
"SubprocessEnvManager._take_step": {
"total": 337.4519795350201,
"count": 231714,
"self": 17.202354210098406,
"children": {
"TorchPolicy.evaluate": {
"total": 320.2496253249217,
"count": 222963,
"self": 320.2496253249217
}
}
},
"workers": {
"total": 2.9280593688498584,
"count": 231714,
"self": 0.0,
"children": {
"worker_root": {
"total": 2437.4626540459903,
"count": 231714,
"is_parallel": true,
"self": 1151.7740427191516,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009962980000182142,
"count": 1,
"is_parallel": true,
"self": 0.00028490700003658276,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007113909999816315,
"count": 2,
"is_parallel": true,
"self": 0.0007113909999816315
}
}
},
"UnityEnvironment.step": {
"total": 0.04562519700004941,
"count": 1,
"is_parallel": true,
"self": 0.0004560899999432877,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022575600007712637,
"count": 1,
"is_parallel": true,
"self": 0.00022575600007712637
},
"communicator.exchange": {
"total": 0.04421913700002733,
"count": 1,
"is_parallel": true,
"self": 0.04421913700002733
},
"steps_from_proto": {
"total": 0.0007242140000016661,
"count": 1,
"is_parallel": true,
"self": 0.00022469900000032794,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004995150000013382,
"count": 2,
"is_parallel": true,
"self": 0.0004995150000013382
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1285.6886113268388,
"count": 231713,
"is_parallel": true,
"self": 39.350458132913445,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.50674252996453,
"count": 231713,
"is_parallel": true,
"self": 83.50674252996453
},
"communicator.exchange": {
"total": 1070.5197323309326,
"count": 231713,
"is_parallel": true,
"self": 1070.5197323309326
},
"steps_from_proto": {
"total": 92.31167833302845,
"count": 231713,
"is_parallel": true,
"self": 34.87549746313027,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.43618086989818,
"count": 463426,
"is_parallel": true,
"self": 57.43618086989818
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 496.32971851800414,
"count": 231714,
"self": 6.60591573306931,
"children": {
"process_trajectory": {
"total": 153.64122678993283,
"count": 231714,
"self": 152.39998308893303,
"children": {
"RLTrainer._checkpoint": {
"total": 1.241243700999803,
"count": 10,
"self": 1.241243700999803
}
}
},
"_update_policy": {
"total": 336.082575995002,
"count": 96,
"self": 272.43994259399904,
"children": {
"TorchPPOOptimizer.update": {
"total": 63.64263340100297,
"count": 2880,
"self": 63.64263340100297
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.799996405490674e-07,
"count": 1,
"self": 9.799996405490674e-07
},
"TrainerController._save_models": {
"total": 0.11930390700035787,
"count": 1,
"self": 0.0018701110002439236,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11743379600011394,
"count": 1,
"self": 0.11743379600011394
}
}
}
}
}
}
}