poca-SoccerTwos / run_logs /timers.json
Dsfajardob's picture
First Push`
bbf06cd
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.102017641067505,
"min": 2.041501998901367,
"max": 3.136293888092041,
"count": 370
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 48341.84375,
"min": 1437.2174072265625,
"max": 140688.5625,
"count": 370
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 882.4,
"min": 361.8,
"max": 999.0,
"count": 370
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 17648.0,
"min": 3996.0,
"max": 31968.0,
"count": 370
},
"SoccerTwos.Step.mean": {
"value": 4999268.0,
"min": 889928.0,
"max": 4999268.0,
"count": 412
},
"SoccerTwos.Step.sum": {
"value": 4999268.0,
"min": 889928.0,
"max": 4999268.0,
"count": 412
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.001689090859144926,
"min": -0.01103414036333561,
"max": 0.021417981013655663,
"count": 412
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.0185799989849329,
"min": -0.12137554585933685,
"max": 0.21417981386184692,
"count": 412
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.001865422003902495,
"min": -0.008956444449722767,
"max": 0.014903930947184563,
"count": 412
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.020519642159342766,
"min": -0.08956444263458252,
"max": 0.14903931319713593,
"count": 412
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 412
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 412
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.1060727292841131,
"min": -0.36363636363636365,
"max": 0.1495636376467618,
"count": 412
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 1.1668000221252441,
"min": -4.702000021934509,
"max": 2.2516000270843506,
"count": 412
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.1060727292841131,
"min": -0.36363636363636365,
"max": 0.1495636376467618,
"count": 412
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 1.1668000221252441,
"min": -4.702000021934509,
"max": 2.2516000270843506,
"count": 412
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 412
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 412
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.020040803001029416,
"min": 0.011714485075208359,
"max": 0.02307238254612053,
"count": 161
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.020040803001029416,
"min": 0.011714485075208359,
"max": 0.02307238254612053,
"count": 161
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 7.547141746423828e-05,
"min": 7.365118520002372e-09,
"max": 0.004154800864246984,
"count": 161
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 7.547141746423828e-05,
"min": 7.365118520002372e-09,
"max": 0.004154800864246984,
"count": 161
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 7.55631832362269e-05,
"min": 1.148362090851328e-08,
"max": 0.004176822351291775,
"count": 161
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 7.55631832362269e-05,
"min": 1.148362090851328e-08,
"max": 0.004176822351291775,
"count": 161
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 161
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 161
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 161
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 161
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 161
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 161
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1202.9464461465905,
"min": 1194.2669302859135,
"max": 1202.9464461465905,
"count": 77
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2405.892892293181,
"min": 2388.533860571827,
"max": 11993.45423655682,
"count": 77
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682547728",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\Usuario\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.0+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1682556315"
},
"total": 8587.9091326,
"count": 1,
"self": 0.1926858999995602,
"children": {
"run_training.setup": {
"total": 0.07200840000000008,
"count": 1,
"self": 0.07200840000000008
},
"TrainerController.start_learning": {
"total": 8587.6444383,
"count": 1,
"self": 3.1742184997965524,
"children": {
"TrainerController._reset_env": {
"total": 3.429319399999788,
"count": 22,
"self": 3.429319399999788
},
"TrainerController.advance": {
"total": 8580.943845500202,
"count": 264991,
"self": 3.1068143002830766,
"children": {
"env_step": {
"total": 2837.7315885999305,
"count": 264991,
"self": 2184.696096199588,
"children": {
"SubprocessEnvManager._take_step": {
"total": 650.9614059002138,
"count": 264991,
"self": 21.13272529962228,
"children": {
"TorchPolicy.evaluate": {
"total": 629.8286806005915,
"count": 527778,
"self": 629.8286806005915
}
}
},
"workers": {
"total": 2.0740865001289484,
"count": 264991,
"self": 0.0,
"children": {
"worker_root": {
"total": 8580.296777699983,
"count": 264991,
"is_parallel": true,
"self": 6847.639169299801,
"children": {
"steps_from_proto": {
"total": 0.02831589999823514,
"count": 44,
"is_parallel": true,
"self": 0.005754299997616119,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.02256160000061902,
"count": 176,
"is_parallel": true,
"self": 0.02256160000061902
}
}
},
"UnityEnvironment.step": {
"total": 1732.6292925001835,
"count": 264991,
"is_parallel": true,
"self": 92.05950100037671,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.91232160010361,
"count": 264991,
"is_parallel": true,
"self": 80.91232160010361
},
"communicator.exchange": {
"total": 1269.1824767999185,
"count": 264991,
"is_parallel": true,
"self": 1269.1824767999185
},
"steps_from_proto": {
"total": 290.47499309978446,
"count": 529982,
"is_parallel": true,
"self": 55.97875599981586,
"children": {
"_process_rank_one_or_two_observation": {
"total": 234.4962370999686,
"count": 2119928,
"is_parallel": true,
"self": 234.4962370999686
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5740.105442599988,
"count": 264991,
"self": 33.02598739999121,
"children": {
"process_trajectory": {
"total": 665.0356726999944,
"count": 264991,
"self": 664.1719426999931,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8637300000013113,
"count": 9,
"self": 0.8637300000013113
}
}
},
"_update_policy": {
"total": 5042.043782500003,
"count": 161,
"self": 463.78144229999,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4578.262340200013,
"count": 5748,
"self": 4578.262340200013
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.999998731771484e-07,
"count": 1,
"self": 6.999998731771484e-07
},
"TrainerController._save_models": {
"total": 0.09705420000136655,
"count": 1,
"self": 0.006680700000288198,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09037350000107836,
"count": 1,
"self": 0.09037350000107836
}
}
}
}
}
}
}