poca-SoccerTwos / run_logs /timers.json
Rayssz's picture
First Push`
b77d666 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.6255717277526855,
"min": 2.0630123615264893,
"max": 3.2957704067230225,
"count": 722
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 33943.390625,
"min": 13144.6884765625,
"max": 120854.34375,
"count": 722
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 423.8333333333333,
"max": 999.0,
"count": 722
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 10172.0,
"max": 31324.0,
"count": 722
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1195.7372710068555,
"min": 1195.7372710068555,
"max": 1209.0615775276597,
"count": 87
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4782.949084027422,
"min": 2391.4808618080865,
"max": 14464.36146440959,
"count": 87
},
"SoccerTwos.Step.mean": {
"value": 7219028.0,
"min": 9678.0,
"max": 7219028.0,
"count": 722
},
"SoccerTwos.Step.sum": {
"value": 7219028.0,
"min": 9678.0,
"max": 7219028.0,
"count": 722
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.002083804691210389,
"min": -0.035258762538433075,
"max": 0.04194639250636101,
"count": 722
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.020838046446442604,
"min": -0.599292516708374,
"max": 0.4194639325141907,
"count": 722
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0018059492576867342,
"min": -0.03531572222709656,
"max": 0.029078194871544838,
"count": 722
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.018059492111206055,
"min": -0.6001506447792053,
"max": 0.2907819449901581,
"count": 722
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 722
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 722
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.044679999351501465,
"min": -0.5,
"max": 0.2465666631857554,
"count": 722
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.44679999351501465,
"min": -6.0,
"max": 2.958799958229065,
"count": 722
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.044679999351501465,
"min": -0.5,
"max": 0.2465666631857554,
"count": 722
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.44679999351501465,
"min": -6.0,
"max": 2.958799958229065,
"count": 722
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 722
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 722
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016023726199637166,
"min": 0.00859238493333881,
"max": 0.02290575326284549,
"count": 330
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016023726199637166,
"min": 0.00859238493333881,
"max": 0.02290575326284549,
"count": 330
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0025711507769301534,
"min": 3.3094705494581454e-10,
"max": 0.0053011750729638155,
"count": 330
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0025711507769301534,
"min": 3.3094705494581454e-10,
"max": 0.0053011750729638155,
"count": 330
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.002575425039200733,
"min": 2.352361768339269e-10,
"max": 0.0052358094952069225,
"count": 330
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.002575425039200733,
"min": 2.352361768339269e-10,
"max": 0.0052358094952069225,
"count": 330
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 330
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 330
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 330
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 330
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 330
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 330
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716391090",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\RAY\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1716409918"
},
"total": 18827.914496999932,
"count": 1,
"self": 10.012552099884488,
"children": {
"run_training.setup": {
"total": 0.2577732000499964,
"count": 1,
"self": 0.2577732000499964
},
"TrainerController.start_learning": {
"total": 18817.644171699998,
"count": 1,
"self": 12.035114099970087,
"children": {
"TrainerController._reset_env": {
"total": 32.076831100159325,
"count": 36,
"self": 32.076831100159325
},
"TrainerController.advance": {
"total": 18773.356785099953,
"count": 469309,
"self": 10.246123759192415,
"children": {
"env_step": {
"total": 8385.282620415208,
"count": 469309,
"self": 6575.170129238279,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1802.591238191002,
"count": 469309,
"self": 58.34078842587769,
"children": {
"TorchPolicy.evaluate": {
"total": 1744.2504497651244,
"count": 932452,
"self": 1744.2504497651244
}
}
},
"workers": {
"total": 7.521252985927276,
"count": 469308,
"self": 0.0,
"children": {
"worker_root": {
"total": 18776.14601170807,
"count": 469308,
"is_parallel": true,
"self": 13706.473688248429,
"children": {
"steps_from_proto": {
"total": 0.0795829005073756,
"count": 72,
"is_parallel": true,
"self": 0.01388260128442198,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.06570029922295362,
"count": 288,
"is_parallel": true,
"self": 0.06570029922295362
}
}
},
"UnityEnvironment.step": {
"total": 5069.592740559136,
"count": 469308,
"is_parallel": true,
"self": 294.45154071133584,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 268.2372855601134,
"count": 469308,
"is_parallel": true,
"self": 268.2372855601134
},
"communicator.exchange": {
"total": 3575.3558380041504,
"count": 469308,
"is_parallel": true,
"self": 3575.3558380041504
},
"steps_from_proto": {
"total": 931.5480762835359,
"count": 938616,
"is_parallel": true,
"self": 178.8616694076918,
"children": {
"_process_rank_one_or_two_observation": {
"total": 752.6864068758441,
"count": 3754464,
"is_parallel": true,
"self": 752.6864068758441
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 10377.828040925553,
"count": 469308,
"self": 108.16009088198189,
"children": {
"process_trajectory": {
"total": 1403.0475844438188,
"count": 469308,
"self": 1401.1869686435675,
"children": {
"RLTrainer._checkpoint": {
"total": 1.860615800251253,
"count": 14,
"self": 1.860615800251253
}
}
},
"_update_policy": {
"total": 8866.620365599752,
"count": 331,
"self": 1083.231060908176,
"children": {
"TorchPOCAOptimizer.update": {
"total": 7783.389304691576,
"count": 9933,
"self": 7783.389304691576
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.00001522898674e-06,
"count": 1,
"self": 2.00001522898674e-06
},
"TrainerController._save_models": {
"total": 0.17543939989991486,
"count": 1,
"self": 0.041915099951438606,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13352429994847625,
"count": 1,
"self": 0.13352429994847625
}
}
}
}
}
}
}