poca-SoccerTwos / run_logs /timers.json
khadivi-ah's picture
Push
b359f51 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1676442623138428,
"min": 3.1411008834838867,
"max": 3.2957351207733154,
"count": 232
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 70448.40625,
"min": 22155.142578125,
"max": 156177.34375,
"count": 232
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 903.0,
"min": 416.45454545454544,
"max": 999.0,
"count": 232
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 21672.0,
"min": 12460.0,
"max": 27804.0,
"count": 232
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1198.5690931650881,
"min": 1193.063294954257,
"max": 1207.1904214672236,
"count": 138
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2397.1381863301763,
"min": 2386.126589908514,
"max": 12030.76499202371,
"count": 138
},
"SoccerTwos.Step.mean": {
"value": 2319182.0,
"min": 9918.0,
"max": 2319182.0,
"count": 232
},
"SoccerTwos.Step.sum": {
"value": 2319182.0,
"min": 9918.0,
"max": 2319182.0,
"count": 232
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0018293745815753937,
"min": -0.09339082986116409,
"max": 0.012058364227414131,
"count": 232
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.02012312039732933,
"min": -1.400862455368042,
"max": 0.12091680616140366,
"count": 232
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0014848989667370915,
"min": -0.09344816207885742,
"max": 0.011044774204492569,
"count": 232
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.016333889216184616,
"min": -1.401706337928772,
"max": 0.13135787844657898,
"count": 232
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 232
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 232
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.10487272522666237,
"min": -0.5333333333333333,
"max": 0.3658933281898499,
"count": 232
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 1.1535999774932861,
"min": -8.0,
"max": 5.488399922847748,
"count": 232
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.10487272522666237,
"min": -0.5333333333333333,
"max": 0.3658933281898499,
"count": 232
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 1.1535999774932861,
"min": -8.0,
"max": 5.488399922847748,
"count": 232
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 232
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 232
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015955663394803803,
"min": 0.00886147110334908,
"max": 0.02335223377837489,
"count": 107
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015955663394803803,
"min": 0.00886147110334908,
"max": 0.02335223377837489,
"count": 107
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0008029293671521979,
"min": 1.882866570213082e-07,
"max": 0.0036716784505794447,
"count": 107
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0008029293671521979,
"min": 1.882866570213082e-07,
"max": 0.0036716784505794447,
"count": 107
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0008045330866783237,
"min": 1.8708090081531737e-07,
"max": 0.003675629124821474,
"count": 107
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0008045330866783237,
"min": 1.8708090081531737e-07,
"max": 0.003675629124821474,
"count": 107
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 107
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 107
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 107
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 107
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 107
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 107
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1725459457",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/poca/SoccerTwos.yaml --env=/content/training-envs-executables/linux/SoccerTwos.x86_64 --run-id=SoccerTwos2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1725464582"
},
"total": 5124.486239383001,
"count": 1,
"self": 0.23174353300055373,
"children": {
"run_training.setup": {
"total": 0.052237429999991036,
"count": 1,
"self": 0.052237429999991036
},
"TrainerController.start_learning": {
"total": 5124.20225842,
"count": 1,
"self": 3.4192652389720024,
"children": {
"TrainerController._reset_env": {
"total": 3.7538606070012293,
"count": 12,
"self": 3.7538606070012293
},
"TrainerController.advance": {
"total": 5116.468793763028,
"count": 151690,
"self": 3.635871973166104,
"children": {
"env_step": {
"total": 4238.218074417985,
"count": 151690,
"self": 3087.1959936429766,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1148.9534307120502,
"count": 151690,
"self": 24.7778011172677,
"children": {
"TorchPolicy.evaluate": {
"total": 1124.1756295947825,
"count": 301330,
"self": 1124.1756295947825
}
}
},
"workers": {
"total": 2.068650062957886,
"count": 151689,
"self": 0.0,
"children": {
"worker_root": {
"total": 5115.227011365101,
"count": 151689,
"is_parallel": true,
"self": 2521.6270250069565,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002925751000020682,
"count": 2,
"is_parallel": true,
"self": 0.0008306990002893144,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020950519997313677,
"count": 8,
"is_parallel": true,
"self": 0.0020950519997313677
}
}
},
"UnityEnvironment.step": {
"total": 0.039634767999928044,
"count": 1,
"is_parallel": true,
"self": 0.0011655649999511297,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008309490000328879,
"count": 1,
"is_parallel": true,
"self": 0.0008309490000328879
},
"communicator.exchange": {
"total": 0.03413032200000998,
"count": 1,
"is_parallel": true,
"self": 0.03413032200000998
},
"steps_from_proto": {
"total": 0.003507931999934044,
"count": 2,
"is_parallel": true,
"self": 0.0006255759998339272,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002882356000100117,
"count": 8,
"is_parallel": true,
"self": 0.002882356000100117
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2593.5753156231476,
"count": 151688,
"is_parallel": true,
"self": 160.897112554248,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 114.16832647205035,
"count": 151688,
"is_parallel": true,
"self": 114.16832647205035
},
"communicator.exchange": {
"total": 1800.066735114799,
"count": 151688,
"is_parallel": true,
"self": 1800.066735114799
},
"steps_from_proto": {
"total": 518.4431414820502,
"count": 303376,
"is_parallel": true,
"self": 85.9873074883626,
"children": {
"_process_rank_one_or_two_observation": {
"total": 432.45583399368763,
"count": 1213504,
"is_parallel": true,
"self": 432.45583399368763
}
}
}
}
},
"steps_from_proto": {
"total": 0.024670734997243926,
"count": 22,
"is_parallel": true,
"self": 0.005079068993381952,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.019591666003861974,
"count": 88,
"is_parallel": true,
"self": 0.019591666003861974
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 874.6148473718769,
"count": 151689,
"self": 29.26396912061398,
"children": {
"process_trajectory": {
"total": 244.21010626226007,
"count": 151689,
"self": 243.33139389226062,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8787123699994481,
"count": 4,
"self": 0.8787123699994481
}
}
},
"_update_policy": {
"total": 601.1407719890028,
"count": 107,
"self": 353.26367217101324,
"children": {
"TorchPOCAOptimizer.update": {
"total": 247.87709981798957,
"count": 3216,
"self": 247.87709981798957
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4319994079414755e-06,
"count": 1,
"self": 1.4319994079414755e-06
},
"TrainerController._save_models": {
"total": 0.560337378999975,
"count": 1,
"self": 0.006237005000002682,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5541003739999724,
"count": 1,
"self": 0.5541003739999724
}
}
}
}
}
}
}